diff --git a/.hugo_build.lock b/.hugo_build.lock new file mode 100644 index 000000000..e69de29bb diff --git a/KubeSphere Documentation Style Guide.md b/KubeSphere Documentation Style Guide.md index 0dcbcdf21..0ab96feaa 100644 --- a/KubeSphere Documentation Style Guide.md +++ b/KubeSphere Documentation Style Guide.md @@ -44,7 +44,7 @@ Give a title first before you write a paragraph. It can be grouped into differen - When you submit your md files to GitHub, make sure you add related image files that appear in md files in the pull request as well. Please save your image files in static/images/docs. You can create a folder in the directory to save your images. - If you want to add remarks (for example, put a box on a UI button), use the color **green**. As some screenshot apps does not support the color picking function for a specific color code, as long as the color is **similar** to #09F709, #00FF00, #09F709 or #09F738, it is acceptable. - Image format: PNG. -- Make sure images in your guide match the content. For example, you mention that users need to log in to KubeSphere using an account of a role; this means the account that displays in your image is expected to be the one you are talking about. It confuses your readers if the content you are describing is not consistent with the image used. +- Make sure images in your guide match the content. For example, you mention that users need to log in to KubeSphere using a user of a role; this means the account that displays in your image is expected to be the one you are talking about. It confuses your readers if the content you are describing is not consistent with the image used. - Recommended: [Xnip](https://xnipapp.com/) for Mac and [Sniptool](https://www.reasyze.com/sniptool/) for Windows. @@ -184,7 +184,7 @@ When describing the UI, you can use the following prepositions. ```bash # Assume your original Kubernetes cluster is v1.17.9 - ./kk create config --with-kubesphere --with-kubernetes v1.17.9 + ./kk create config --with-kubesphere --with-kubernetes v1.20.4 ``` - If the comment is used for all the code (for example, serving as a header for explanations), put the comment at the beginning above the code. For example: diff --git a/OWNERS b/OWNERS index 36c088343..bb20729bb 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,10 @@ approvers: + - Felixnoo #oncall - Patrick-LuoYu #oncall - zryfish - rayzhou2017 + - faweizhao26 + - yangchuansheng - FeynmanZhou reviewers: diff --git a/assets/scss/conferences.scss b/assets/scss/conferences.scss index dba0cb5c5..9d158f91e 100644 --- a/assets/scss/conferences.scss +++ b/assets/scss/conferences.scss @@ -15,18 +15,6 @@ & > ul { - li:nth-child(1) { - .top-div { - background-image: linear-gradient(270deg, rgb(101, 193, 148), rgb(76, 169, 134)) - } - } - - li:nth-child(2) { - .top-div { - background-image: linear-gradient(to left, rgb(52, 197, 209), rgb(95, 182, 216)) - } - } - & > li { .top-div { position: relative; diff --git a/assets/scss/content.scss b/assets/scss/content.scss index a6ea4e31a..604318b3a 100644 --- a/assets/scss/content.scss +++ b/assets/scss/content.scss @@ -6,7 +6,7 @@ } .main-section { - & > div { + &>div { position: relative; padding-top: 93px; @@ -50,7 +50,7 @@ h1 { margin-top: 20px; margin-bottom: 40px; - text-shadow: 0 8px 16px rgba(35,45,65,.1); + text-shadow: 0 8px 16px rgba(35, 45, 65, .1); font-size: 40px; font-weight: 500; line-height: 1.4; @@ -80,7 +80,7 @@ line-height: 2.29; color: #36435c; } - + .md-body h2 { font-weight: 500; line-height: 64px; @@ -90,13 +90,13 @@ margin-bottom: 20px; border-bottom: 1px solid #ccd3db; } - + .md-body h3 { font-weight: 600; line-height: 1.5; color: #171c34; } - + .md-body img { max-width: 100%; box-sizing: content-box; @@ -104,30 +104,30 @@ border-radius: 5px; box-shadow: none; } - + .md-body blockquote { padding: 4px 20px 4px 12px; border-radius: 4px; background-color: #ecf0f2; } - + &-metadata { margin-bottom: 28px; - + &-title { font-size: 16px; font-weight: 500; line-height: 1.5; color: #171c34; } - + &-time { font-size: 14px; line-height: 1.43; color: #919aa3; } } - + &-title { text-shadow: 0 8px 16px rgba(35, 45, 65, 0.1); font-size: 40px; @@ -135,7 +135,7 @@ line-height: 1.4; color: #171c34; margin-bottom: 40px; - + @media only screen and (max-width: $mobile-max-width) { font-size: 28px; } @@ -150,6 +150,7 @@ bottom: 10px; transform: translateX(350px); width: 230px; + @media only screen and (max-width: $mobile-max-width) { display: none; } @@ -158,7 +159,7 @@ max-height: 100%; position: relative; overflow-y: auto; - } + } .title { height: 32px; @@ -166,13 +167,14 @@ line-height: 1.33; color: #36435c; padding-bottom: 10px; - border-bottom: solid 1px #ccd3db;; + border-bottom: solid 1px #ccd3db; } .tabs { - #TableOfContents > ul > li > a { + #TableOfContents>ul>li>a { font-weight: 500; } + li { margin: 10px 0; font-size: 16px; @@ -195,6 +197,7 @@ color: #55bc8a; } } + li li { padding-left: 20px; } @@ -202,121 +205,137 @@ } } } -.SubscribeForm { - position: fixed; - right: 49px; - bottom: 32px; - box-shadow: 0px 8px 16px rgba(36, 46, 66, 0.05), 0px 4px 8px rgba(36, 46, 66, 0.06); - .innerBox { - width: 440px; - height: 246px; - overflow: hidden; - background: url('/images/home/modal-noText.svg'); - position: relative; - padding: -8px -16px; - background-position: -16px -8px; +@media only screen and (min-width: $mobile-max-width) { + .SubscribeForm { + position: fixed; + right: 49px; + bottom: 32px; - .close { - position: absolute; - top: 24px; - right: 24px; - cursor: pointer; - } + box-shadow: 0px 8px 16px rgba(36, 46, 66, 0.05), + 0px 4px 8px rgba(36, 46, 66, 0.06); - p { - width: 360px; - height: 44px; - left: 40px; - top: 103px; - right: 40px; - position: absolute; - font-family: ProximaNova; - font-size: 16px; - line-height: 22px; - color: #919AA3; + .innerBox { + width: 440px; + height: 246px; + overflow: hidden; + background: url('/images/home/modal-noText.svg'); + position: relative; + padding: -8px -16px; + background-position: -16px -8px; - } - - div { - bottom: 32px; - left: 40px; - position: absolute; - width: 358px; - height: 48px; - margin-top: 20px; - border-radius: 24px; - border: solid 1px #ccd3db; - background-color: #f5f8f9; - - @mixin placeholder { - font-family: PingFangSC; - font-size: 14px; - line-height: 16px; - text-align: right; - color: #CCD3DB; + .close { + position: absolute; + top: 24px; + right: 24px; + cursor: pointer; } - input { - width: 207px; - height: 20px; - font-size: 14px; - margin-left: 16px; - color: #ccd3db; - border: none; - outline: none; + p { + width: 360px; + height: 44px; + left: 40px; + top: 103px; + right: 40px; + position: absolute; + font-family: ProximaNova; + font-size: 16px; + line-height: 22px; + color: #919AA3; + + } + + div { + bottom: 32px; + left: 40px; + position: absolute; + width: 358px; + height: 48px; + margin-top: 20px; + border-radius: 24px; + border: solid 1px #ccd3db; background-color: #f5f8f9; - &:-webkit-input-placeholder { - @include placeholder(); + @mixin placeholder { + font-family: PingFangSC; + font-size: 14px; + line-height: 16px; + text-align: right; + color: #CCD3DB; } - &:-ms-input-placeholder { - @include placeholder(); - } - - &:-moz-placeholder { - @include placeholder(); - } - - &:-moz-placeholder { - @include placeholder(); - } - } - - button { - width: 111px; - height: 40px; - margin: 4px 5px 4px 14px; - border-radius: 20px; - border: none; - font-size: 14px; - color: #ffffff; - cursor: pointer; - box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2); - background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a); - - &:hover { - box-shadow: none; - } - } - - @media only screen and (max-width: $mobile-max-width) { - width: 326px; - input { - width: 196px; + width: 207px; + height: 20px; + font-size: 14px; + margin-left: 16px; + color: #ccd3db; + border: none; + outline: none; + background-color: #f5f8f9; + + &:-webkit-input-placeholder { + @include placeholder(); + } + + &:-ms-input-placeholder { + @include placeholder(); + } + + &:-moz-placeholder { + @include placeholder(); + } + + &:-moz-placeholder { + @include placeholder(); + } } button { - width: 90px; + width: 111px; + height: 40px; + margin: 4px 5px 4px 14px; + border-radius: 20px; + border: none; + font-size: 14px; + color: #ffffff; + cursor: pointer; + box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2); + background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a); + + &:hover { + box-shadow: none; + } + } + + @media only screen and (max-width: $mobile-max-width) { + width: 326px; + + input { + width: 196px; + } + + button { + width: 90px; + } + } + + span { + color: red; } } - span { - color: red; - } } - } +} + +@media only screen and (max-width: $mobile-max-width) { + + .SubscribeForm { + display: none !important; + } +} + +#videoPlayer { + width: 100%; } \ No newline at end of file diff --git a/assets/scss/doc.scss b/assets/scss/doc.scss index fb1b59855..d560114b6 100644 --- a/assets/scss/doc.scss +++ b/assets/scss/doc.scss @@ -853,7 +853,7 @@ footer { p { width: 360px; - font-family: ProximaNova; + font-family: 'Proxima Nova'; font-size: 16px; line-height: 22px; color: #919AA3; diff --git a/assets/scss/learn-page.scss b/assets/scss/learn-page.scss index 4fdcdc513..856dae9e3 100644 --- a/assets/scss/learn-page.scss +++ b/assets/scss/learn-page.scss @@ -1,5 +1,40 @@ @import "variables"; +@mixin tooltip { + .tooltip { + visibility: hidden; + width: 80px; + padding: 8px 12px; + background: #242E42; + box-shadow: 0px 4px 8px rgba(36, 46, 66, 0.2); + border-radius: 4px; + transform: translateX(-50%); + box-sizing: border-box; + /* 定位 */ + position: absolute; + z-index: 1; + + font-family: PingFang SC; + font-style: normal; + font-size: 12px; + line-height: 20px; + color: #fff; + text-align: center; + + &::after { + content: " "; + position: absolute; + top: 100%; + /* 提示工具底部 */ + left: 50%; + margin-left: -5px; + border-width: 5px; + border-style: solid; + border-color: #242E42 transparent transparent; + } + } +} + .navigation { box-shadow: 0 4px 8px 0 rgba(36, 46, 66, 0.06), 0 8px 16px 0 rgba(36, 46, 66, 0.05); background-image: linear-gradient(to bottom, rgba(134, 219, 162, 0.9), rgba(0, 170, 114, 0.9)); @@ -88,7 +123,8 @@ .right { box-sizing: border-box; width: 368px; - padding: 24px; + padding: 24px 20px 0 20px; + min-height: 488px; max-height: 600px; margin-left: 15px; overflow: auto; @@ -98,163 +134,200 @@ display: none; } - .lesson-div { - margin-top: 20px; + .sections { + display: flex; + flex-direction: column; + align-items: center; + margin-bottom: 12px; - &:first-child { - margin-top: 0; - } - - & > p { + .sectionFolder { + box-sizing: border-box; + width: 328px; + display: flex; + align-items: center; + flex-direction: row; + padding: 9px 16px; + background: #F9FBFD; + border-radius: 4px; position: relative; - padding-left: 9px; - font-size: 16px; - font-weight: 500; - line-height: 1.5; - letter-spacing: -0.04px; - &::before { - position: absolute; - top: 10px; - left: 0; - content: ""; - width: 4px; - height: 4px; - border-radius: 50%; - background-color: #36435c; + &:hover { + cursor: pointer; + background: #EFF4F9; } - a { - color: #36435c; - &:hover { - color: #55bc8a; - } + .text { + font-weight: 500; + font-size: 16px; + line-height: 22px; + width: 264px; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; } .icon { - display: inline-block; - margin-left: 6px; - width: 12px; - height: 12px; - background-image: url("/images/learn/video.svg"); - } - - .play-span { - display: none; - height: 12px; - font-size: 0; - span { - display: inline-block; - width: 2px; - height: 100%; - margin-right: 2px; - background-color: #55bc8a; - } - } - - .playing { - display: inline-block; - span { - animation-name: playing; - animation-duration: 1s; - animation-timing-function: ease; - animation-delay: 0s; - animation-iteration-count: infinite; - - &:first-child { - animation-delay: 0.3s; - } - &:last-child { - animation-delay: 0.5s; - } - } + display: block; + height: 10px; + width: 10px; + background-image: url('/images/learn/icon-setion-close.svg'); + background-repeat: no-repeat; + position: absolute; + right: 17px; } } - & > p.active { - a { - color: #55bc8a; - } - &::before { - background-color: #55bc8a; - } - } - } - - .lesson-link-div { - margin-top: 10px; - display: flex; - a { - display: block; - box-sizing: border-box; - width: 100px; - height: 72px; - padding: 11px 20px 10px; - margin-left: 10px; - margin-right: 0; - font-size: 14px; - line-height: 24px; - text-align: center; - color: #8f94a1; - border-radius: 4px; - background-color: #f5f9fa; - border: solid 1px transparent; - - &:first-child { - margin-left: 0; - } - - &:hover { - border: solid 1px #4ca986; - } - - span { - display: inline-block; - width: 24px; - height: 24px; - } - } - .active { - color: #00a971; - border: solid 1px #55bc8a; - background-color: #cdf6d5; - } + background: linear-gradient(180deg, #242E42 0%, #36435C 100%) !important; + color: #ffffff; - .lesson { - span { - background-image: url("/images/learn/icon-image.svg"); + &>.icon { + background-image: url('/images/learn/icon-setion-open.svg'); } } - .lesson.active { - span { - background-image: url("/images/learn/icon-image-active.svg"); + ul { + transition: 1.2s; + + li { + width: 320px; + height: 24px; + margin: 16px 0px; + list-style: none; + display: flex; + align-items: center; + position: relative; + cursor: pointer; + + .textLink { + width: 252px; + display: flex; + align-items: center; + + .videoIcon { + display: block; + width: 12px; + height: 12px; + margin-right: 8px; + background-image: url('/images/learn/lesson-video.svg'); + } + + .text { + flex: 1; + font-family: PingFang SC; + font-style: normal; + font-weight: normal; + font-size: 14px; + line-height: 24px; + display: block; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + } + } + + .actions { + width: 68px; + height: 24px; + display: flex; + flex-direction: row; + align-items: center; + justify-content: flex-end; + + .picture { + width: 16px; + height: 12px; + background-image: url('/images/learn/actions-picture.svg'); + background-repeat: no-repeat; + position: relative; + + @include tooltip(); + + &:hover { + background-image: url('/images/learn/actions-picture-active.svg'); + + .tooltip { + visibility: visible; + bottom: 20px; + left: 8px; + } + } + } + + .activePicture { + background-image: url('/images/learn/actions-picture-open.svg') !important; + } + + .ppt { + width: 16px; + height: 16px; + background-image: url('/images/learn/actions-ppt.svg'); + background-repeat: no-repeat; + position: relative; + margin: 0 10px; + @include tooltip(); + + &:hover { + .tooltip { + visibility: visible; + bottom: 20px; + left: 8px; + } + } + } + + .download { + width: 16px; + height: 16px; + background-image: url('/images/learn/actions-download.svg'); + background-repeat: no-repeat; + position: relative; + @include tooltip(); + + &:hover { + .tooltip { + visibility: visible; + bottom: 20px; + left: 8px; + } + } + } + } + + &:hover { + .textLink { + .videoIcon { + background-image: url('/images/learn/lesson-video-hover.svg'); + } + } + + .text { + color: #4CA986; + } + } + } + + .pptActive{ + .text { + color: #4CA986; + } + } + + .activeLine { + .textLink { + .videoIcon { + background-image: url('/images/learn/lesson-video-play.svg') !important; + } + + .text { + color: #4CA986; + } + } } } - .courseware { - span { - background-image: url("/images/learn/icon-ppt.svg"); - } - } - - .courseware.active { - span { - background-image: url("/images/learn/icon-ppt-active.svg"); - } - } - - .examination { - span { - background-image: url("/images/learn/icon-download.svg"); - } - } - - .examination.active { - span { - background-image: url("/images/learn/icon-download-active.svg"); - } + .hideLesson { + display: none; } } } diff --git a/assets/scss/learn.scss b/assets/scss/learn.scss index 6e05c8998..5f39cfe07 100644 --- a/assets/scss/learn.scss +++ b/assets/scss/learn.scss @@ -144,14 +144,15 @@ h2 { font-size: 0; overflow-x: auto; white-space: nowrap; + display: flex; li { position: relative; display: inline-block; box-sizing: border-box; white-space: normal; - width: 323px; - height: 237px; + min-width: 323px; + min-height: 237px; padding: 30px 20px 30px 62px; margin-left: 70px; font-size: 14px; @@ -169,6 +170,7 @@ h2 { left: -50px; width: 100px; height: 100px; + object-fit: cover; border-radius: 50%; } @@ -250,7 +252,7 @@ h2 { margin-top: 68px; & > li { position: relative; - padding: 50px 39px 20px 40px; + padding: 50px 39px 40px 40px; margin-bottom: 58px; border-radius: 8px; background-color: #ffffff; @@ -276,6 +278,10 @@ h2 { top: -20px; left: 30px; border-radius: 5px; + white-space: nowrap; + text-overflow: ellipsis; + max-width: 75%; + overflow: hidden; } } @@ -371,6 +377,46 @@ h2 { } } } + + .button{ + position: absolute; + height: 48px; + width: 100%; + bottom: 0; + left: 0; + background: linear-gradient(360deg, rgba(85, 188, 138, 0.25) 0%, rgba(85, 188, 138, 0) 100%); + border: none; + font-weight: 600; + font-size: 14px; + line-height: 20px; + color: #0F8049; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + + #close{ + display: none; + } + } + + .hideButton{ + display: none; + } + + .active{ + #open{ + display: none; + } + + #close{ + display: block; + } + + svg{ + transform: rotateX(180deg); + } + } } } } diff --git a/assets/scss/live.scss b/assets/scss/live.scss index e3611b66c..a94fd87f0 100644 --- a/assets/scss/live.scss +++ b/assets/scss/live.scss @@ -1,5 +1,6 @@ @import 'variables'; @import 'mixin'; + .btn-a { display: inline-block; padding: 0 53px; @@ -10,10 +11,12 @@ color: #ffffff; box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2), 0 10px 50px 0 rgba(34, 43, 62, 0.1); background-image: linear-gradient(to bottom, rgba(85, 188, 138, 0), rgba(85, 188, 138, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a); + &:hover { box-shadow: none; } } + .section-1 { position: relative; padding-top: 124px; @@ -40,16 +43,19 @@ position: relative; width: 840px; height: 400px; + @media only screen and (max-width: $mobile-max-width) { width: 100%; height: auto; } + img { width: 100%; height: 100%; min-height: 200px; object-fit: cover; } + button { position: absolute; right: 20px; @@ -62,6 +68,7 @@ cursor: pointer; box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2), 0 10px 50px 0 rgba(34, 43, 62, 0.1); background-image: linear-gradient(to bottom, rgba(85, 188, 138, 0), rgba(85, 188, 138, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a); + &:hover { box-shadow: none; } @@ -73,12 +80,14 @@ width: 320px; height: 400px; padding: 10px; + @media only screen and (max-width: $mobile-max-width) { max-width: 320px; width: auto; height: auto; margin: 0 auto; } + h2 { margin-bottom: 10px; font-size: 18px; @@ -116,8 +125,8 @@ font-size: 16px; line-height: 28px; letter-spacing: -0.04px; - color: #919aa3; - + color: #919aa3; + img { vertical-align: middle; margin-right: 4px; @@ -127,8 +136,8 @@ a { margin: 34px auto 0; height: 40px; - padding: 0 28px; - line-height: 40px; + padding: 0 28px; + line-height: 40px; } .tag { @@ -165,12 +174,12 @@ padding-bottom: 40px; } - & > div { + &>div { - & > .video-tab-ul { + &>.video-tab-ul { padding: 0 34px; border-radius: 5px; - box-shadow: 0 4px 16px 0 rgba(7,42,68,.1); + box-shadow: 0 4px 16px 0 rgba(7, 42, 68, .1); background-color: #fff; li { @@ -188,18 +197,19 @@ text-align: center; &:hover { - box-shadow: 0 8px 16px 0 rgba(101,193,148,.2),0 0 50px 0 rgba(101,193,148,.1); + box-shadow: 0 8px 16px 0 rgba(101, 193, 148, .2), 0 0 50px 0 rgba(101, 193, 148, .1); background-color: #55bc8a; color: #fff; } } + .active { - box-shadow: 0 8px 16px 0 rgba(101,193,148,.2),0 0 50px 0 rgba(101,193,148,.1); + box-shadow: 0 8px 16px 0 rgba(101, 193, 148, .2), 0 0 50px 0 rgba(101, 193, 148, .1); background-color: #55bc8a; color: #fff; } - li + li { + li+li { margin-left: 12px; } } @@ -207,11 +217,12 @@ .video-ul { margin-top: 20px; font-size: 0; + @media only screen and (max-width: $mobile-max-width) { text-align: center; } - & > li { + &>li { position: relative; display: inline-block; width: 360px; @@ -225,18 +236,18 @@ text-align: left; cursor: pointer; - & > img { + &>img { width: 100%; height: 100%; } &:hover { - & > div { + &>div { height: 202px; } } - & > div { + &>div { position: absolute; left: 0; right: 0; @@ -247,14 +258,14 @@ transition: all .2s ease-in-out; overflow: hidden; - & > .btn { + &>.btn { position: absolute; left: 50%; bottom: 120px; transform: translateX(-50%); } - & > div { + &>div { position: absolute; left: 0; right: 0; @@ -269,7 +280,7 @@ color: #fff; padding: 8px 0; margin-bottom: 6px; - border-bottom: 1px solid hsla(0,0%,100%,.1); + border-bottom: 1px solid hsla(0, 0%, 100%, .1); text-overflow: ellipsis; white-space: nowrap; overflow: hidden; @@ -306,7 +317,7 @@ } } - & > div { + &>div { margin-top: 20px; text-align: center; @@ -342,7 +353,7 @@ padding: 0; border-radius: 0; font-size: 0; - + .video-div { height: 100%; } @@ -363,7 +374,7 @@ width: 100%; max-width: 100%; height: auto; - + iframe { width: 100%; height: 300px; @@ -371,99 +382,270 @@ } } -.section-4 { - background-image: linear-gradient(113deg, #4a499a 27%, #8552c3 81%); - .common-layout { - white-space: nowrap; - overflow: auto; - & > div { - box-sizing: border-box; - display: inline-block; - vertical-align: top; - white-space: normal; - width: 140px; - height: 225px; - margin: 80px 40px; - padding-top: 20px; - border-top: 1px solid #a1b3c4; - .time-div { +.common-layout-special { + white-space: nowrap; + overflow: auto; + height: 535px; + background-image: linear-gradient(147.87deg, #4A499A 16%, #8552C3 85.01%); + + @media only screen and (max-width: $mobile-max-width) { + width: 100%; + height: auto; + } + + .meetup-box { + max-width: 1320px; + margin: 0 auto; + + @media only screen and (max-width: $mobile-max-width) { + width: 100%; + } + + .meetup-title { + font-size: 32px; + line-height: 45px; + color: #fff; + text-align: center; + padding-top: 56px; + } + + .innerBox { + padding: 0px 80px; + position: relative; + + @media only screen and (max-width: $mobile-max-width) { + width: 100%; + box-sizing: border-box; + padding: 0 20px; + } + + &>ul { + margin-top: 16px; display: flex; + overflow: scroll; + background: linear-gradient(to top, rgba(255, 255, 255, 0.08) 5%, transparent 5%) no-repeat; - .right { - margin-left: 4px; - font-weight: bold; - line-height: 1; - color: #ffffff; - .date { - margin-bottom: 4px; - font-size: 24px; - } - .time { - font-size: 14px; - } + li { + width: 80px; + height: 40px; + line-height: 24px; + color: rgba(255, 255, 255, 0.15); + display: flex; + justify-content: center; + align-items: center; + flex-shrink: 0; + box-sizing: border-box; + } + + .tab_active { + color: rgba(255, 255, 255, 0.7); + border-bottom: solid 2px rgba(255, 255, 255, 0.7); } } - h3 { - height: 60px; - margin: 21px 0 47px; - font-size: 14px; - font-weight: 500; - line-height: 1.43; - color: #d5dee7; - a { - color: #d5dee7; + .yearBox { + display: flex; + align-items: center; + min-height: 377px; + + .hiddenUl { + display: none; + } + + .autoMeetUp { + width: 100%; + height: 260px; + margin: 0 auto; + display: flex; + position: relative; + + .swiper-slide { + display: flex; + overflow: hidden; + flex-direction: column; + } + + li { + height: 258px; + + .imgBox { + position: relative; + display: flex; + align-items: center; + flex-direction: column; + + p { + text-align: center; + font-weight: 500; + font-size: 20px; + line-height: 28px; + color: #FFFFFF; + } + + img { + margin-top: 20px; + width: 373px; + height: 210px; + } + + .button { + position: absolute; + right: 10px; + bottom: 10px; + padding: 10px 20px; + border-radius: 28px; + font-size: 16px; + color: #fff; + border: none; + cursor: pointer; + box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2), 0 10px 50px 0 rgba(34, 43, 62, 0.1); + background-image: linear-gradient(to bottom, rgba(85, 188, 138, 0), rgba(85, 188, 138, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a); + + &:hover { + box-shadow: none; + } + } + } + + } + + @media only screen and (max-width: 375px) { + .swiper-slide { + display: flex; + flex-direction: column; + align-items: center; + } + + li { + + &:nth-child(2) { + margin: 0 0; + } + + img { + width: 100% !important; + } + } + } + + @media only screen and (min-width: 376px) and (max-width: $mobile-max-width) { + + .swiper-slide { + display: flex; + flex-direction: column; + align-items: center; + } + + li { + &:nth-child(2) { + margin: 0 0; + } + + img { + width: 373px !important; + } + + .button { + right: 15px !important; + } + } + } + + @media only screen and (min-width: 769px) and (max-width: 1160px) { + + li { + flex-direction: column; + align-items: center; + + img { + width: 320px !important; + height: 180px !important; + } + + .button { + right: 10px; + bottom: 40px; + } + } + } + } + + .prev-button { + display: block; + background: url('/images/live/arrow.svg'); + width: 40px; + height: 40px; + position: absolute; + bottom: 153.5px; + left: 0px; + transform: rotate(180deg); + cursor: pointer; + + @media only screen and (max-width: 767px) { + display: none; + } + + @media only screen and (min-width: $mobile-max-width) and (max-width: 1160px) { + left: 20px; + z-index: 200; + } &:hover { - color: #008a5c; + background: url('/images/live/arrow-hover.svg'); } } - } - button { - font-size: 12px; - font-weight: 600; - line-height: 2; - border: none; - padding: 5px 28px; - border-radius: 17px; - cursor: pointer; - box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2); - &:hover { - box-shadow: none; + .next-button { + display: block; + background: url('/images/live/arrow.svg'); + width: 40px; + height: 40px; + position: absolute; + bottom: 153.5px; + right: 0px; + cursor: pointer; + + @media only screen and (max-width: 767px) { + display: none; + } + + @media only screen and (min-width: $mobile-max-width) and (max-width: 1160px) { + right: 20px; + z-index: 200; + } + + &:hover { + background: url('/images/live/arrow-hover.svg'); + } + } } - - .over-btn { - color: #ffffff; - background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #242e42, #242e42); - } - - .notive-btn { - color: #3d3e49; - background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #ffffff, #ffffff); - } } } } + .section-5 { .common-layout { position: relative; padding-top: 100px; padding-left: 60px; padding-bottom: 30px; + @media only screen and (max-width: $mobile-max-width) { padding-left: 20px; } + .left-div { position: relative; width: 600px; + @media only screen and (max-width: $mobile-max-width) { width: 100%; z-index: 2; } + h2 { font-size: 32px; font-weight: 600; @@ -485,13 +667,28 @@ } } - & > img { + &>img { position: absolute; top: 88px; right: 0; + @media only screen and (max-width: $mobile-max-width) { opacity: 0.3; } } } +} + +.my-bullet-active { + background: #55bc8a; + opacity: 1; +} + +.swiper-horizontal>.swiper-pagination-bullets, +.swiper-pagination-bullets.swiper-pagination-horizontal, +.swiper-pagination-custom, +.swiper-pagination-fraction { + bottom: 5px; + left: 0; + width: 100%; } \ No newline at end of file diff --git a/assets/scss/mixin.scss b/assets/scss/mixin.scss index 136ddc93b..813efff29 100644 --- a/assets/scss/mixin.scss +++ b/assets/scss/mixin.scss @@ -180,3 +180,19 @@ padding-top: 20px; } } + +@mixin common-layout-special { + position: relative; + width: 1300px; + margin: 0 auto; + padding-left: 260px; + + @media only screen and (max-width: $width-01) { + width: 100%; + } + + @media only screen and (max-width: $width-02) { + padding: 10px; + padding-top: 20px; + } +} diff --git a/config/_default/config.toml b/config/_default/config.toml index 90ecd20e4..843f8fe1a 100644 --- a/config/_default/config.toml +++ b/config/_default/config.toml @@ -3,6 +3,8 @@ baseURL = "https://kubesphere-v3.netlify.app" enableRobotsTXT = true [markup] + [markup.goldmark.extensions] + typographer = false [markup.tableOfContents] endLevel = 3 ordered = false @@ -10,6 +12,7 @@ enableRobotsTXT = true [markup.goldmark.renderer] unsafe= true + [Taxonomies] [params] @@ -21,8 +24,6 @@ githubBlobUrl = "https://github.com/kubesphere/website/blob/master/content" githubEditUrl = "https://github.com/kubesphere/website/edit/master/content" -mailchimpSubscribeUrl = "https://kubesphere.us10.list-manage.com/subscribe/post?u=c85ea2b944b08b951f607bdd4&id=83f673a2d9" - gcs_engine_id = "018068616810858123755%3Apb1pt8sx6ve" githubLink = "https://github.com/kubesphere/kubesphere" @@ -34,6 +35,7 @@ twitterLink = "https://twitter.com/KubeSphere" mediumLink = "https://itnext.io/@kubesphere" linkedinLink = "https://www.linkedin.com/company/kubesphere/" + [languages.en] contentDir = "content/en" weight = 1 @@ -45,6 +47,7 @@ title = "KubeSphere | The Kubernetes platform tailored for hybrid multicloud" description = "KubeSphere is a distributed operating system managing cloud native applications with Kubernetes as its kernel, and provides plug-and-play architecture for the seamless integration of third-party applications to boost its ecosystem." keywords = "KubeSphere, Kubernetes, container platform, DevOps, hybrid cloud, cloud native" snapshot = "/images/common/snapshot-en.png" +mailchimpSubscribeUrl = "https://kubesphere.us10.list-manage.com/subscribe/post?u=c85ea2b944b08b951f607bdd4&id=83f673a2d9" [[languages.en.menu.main]] weight = 2 @@ -105,33 +108,39 @@ hasChildren = true [[languages.en.menu.main]] parent = "Documentation" - name = "v3.1.x star" - URL = "docs/" + name = "v3.2.x star" + URL = "/docs" weight = 1 + [[languages.en.menu.main]] + parent = "Documentation" + name = "v3.1.x" + URL = "https://v3-1.docs.kubesphere.io/docs" + weight = 2 + [[languages.en.menu.main]] parent = "Documentation" name = "v3.0.0" URL = "https://v3-0.docs.kubesphere.io/docs" - weight = 2 + weight = 3 [[languages.en.menu.main]] parent = "Documentation" name = "v2.1.x" URL = "https://v2-1.docs.kubesphere.io/docs" - weight = 3 + weight = 4 [[languages.en.menu.main]] parent = "Documentation" name = "v2.0.x" URL = "https://v2-0.docs.kubesphere.io/docs/" - weight = 4 + weight = 5 [[languages.en.menu.main]] parent = "Documentation" name = "v1.0.0" URL = "https://v1-0.docs.kubesphere.io/docs/" - weight = 5 + weight = 6 [[languages.en.menu.main]] weight = 5 @@ -185,6 +194,7 @@ title = "KubeSphere | 面向云原生应用的容器混合云" description = "KubeSphere 是在 Kubernetes 之上构建的以应用为中心的多租户容器平台,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。" keywords = "KubeSphere, Kubernetes, 容器平台, DevOps, 混合云" snapshot = "/images/common/snapshot-zh.png" +mailchimpSubscribeUrl = "https://yunify.us2.list-manage.com/subscribe/post?u=f29f08cef80223b46bad069b5&id=4838e610c2" [[languages.zh.menu.main]] weight = 2 @@ -244,33 +254,39 @@ hasChildren = true name = "文档中心" [[languages.zh.menu.main]] parent = "文档中心" - name = "v3.1.x star" - URL = "docs/" + name = "v3.2.x star" + URL = "/docs/" weight = 1 + [[languages.zh.menu.main]] + parent = "文档中心" + name = "v3.1.x" + URL = "https://v3-1.docs.kubesphere.io/zh/docs/" + weight = 2 + [[languages.zh.menu.main]] parent = "文档中心" name = "v3.0.0" URL = "https://v3-0.docs.kubesphere.io/zh/docs/" - weight = 2 + weight = 3 [[languages.zh.menu.main]] parent = "文档中心" name = "v2.1.x" URL = "https://v2-1.docs.kubesphere.io/docs/zh-CN/" - weight = 3 + weight = 4 [[languages.zh.menu.main]] parent = "文档中心" name = "v2.0.x" URL = "https://v2-0.docs.kubesphere.io/docs/zh-CN/" - weight = 4 + weight = 5 [[languages.zh.menu.main]] parent = "文档中心" name = "v1.0.0" URL = "https://v1-0.docs.kubesphere.io/docs/zh-CN/" - weight = 5 + weight = 6 [[languages.zh.menu.main]] weight = 5 diff --git a/config/upstream-zh/config.toml b/config/upstream-zh/config.toml index 955694d34..c256e51ec 100644 --- a/config/upstream-zh/config.toml +++ b/config/upstream-zh/config.toml @@ -4,5 +4,12 @@ defaultContentLanguage = "zh" [params] showCaseNumber = true +addBaiduAnalytics = true + bilibiliLink = "https://space.bilibili.com/438908638" -mailchimpSubscribeUrl = "https://yunify.us2.list-manage.com/subscribe/post?u=f29f08cef80223b46bad069b5&id=4838e610c2" \ No newline at end of file + +[languages.en.params] +mailchimpSubscribeUrl = "https://yunify.us2.list-manage.com/subscribe/post?u=f29f08cef80223b46bad069b5&id=4838e610c2" + +[languages.zh.params] +mailchimpSubscribeUrl = "https://yunify.us2.list-manage.com/subscribe/post?u=f29f08cef80223b46bad069b5&id=4838e610c2" diff --git a/content/en/_index.md b/content/en/_index.md index e4efe02d0..abc661b4a 100644 --- a/content/en/_index.md +++ b/content/en/_index.md @@ -90,7 +90,7 @@ section4: - name: Multiple Storage and Networking Solutions icon: /images/home/multi-tenant-management.svg - content: Support GlusterFS, CephRBD, NFS, LocalPV solutions, and provide CSI plugins to consume storage from multiple cloud providers. Provide a load balancer Porter for bare metal Kubernetes, and offers network policy management, support Calico and Flannel CNI + content: Support GlusterFS, CephRBD, NFS, LocalPV solutions, and provide CSI plugins to consume storage from multiple cloud providers. Provide a load balancer OpenELB for bare metal Kubernetes, and offers network policy management, support Calico and Flannel CNI features: diff --git a/content/en/blogs/Kubernetes-Backup-and-Restore-with-Kasten-K10 on-KubeSphere.md b/content/en/blogs/Kubernetes-Backup-and-Restore-with-Kasten-K10 on-KubeSphere.md index c17eb71eb..564cce62b 100644 --- a/content/en/blogs/Kubernetes-Backup-and-Restore-with-Kasten-K10 on-KubeSphere.md +++ b/content/en/blogs/Kubernetes-Backup-and-Restore-with-Kasten-K10 on-KubeSphere.md @@ -20,9 +20,9 @@ In this article, we will introduce the deployment of Kasten K10 on KubeSphere. ## Provision a KubeSphere Cluster -This article will introduce how to deploy Kasten on on KubeSphere Container Platform. You can install KubeSphere on any Kubernetes cluster or Linux system, refer to [KubeSphere documentation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/) for more details or vist the [Github]( https://github.com/kubesphere/website) of KubeSphere. +This article will introduce how to deploy Kasten on KubeSphere Container Platform. You can install KubeSphere on any Kubernetes cluster or Linux system, refer to [KubeSphere documentation](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/) for more details or vist the [Github]( https://github.com/kubesphere/website) of KubeSphere. -After the creation of KubeSphere cluster, you can log in to KubeSphere web console: +After the creation of KubeSphere cluster, you can log in to the KubeSphere web console: ![web console](/images/blogs/en/kastenk10image/kastenk10-step3.png) Click the button "Platform" in the upper left corner and then select "Access Control"; Create a new workspace called Kasten-Workspace. @@ -34,7 +34,7 @@ Enter "Kasten-workspace" and select "App Repositoties"; Add an application repos Add the official Helm Repository of Kasten to KubeSphere. **Helm repository address**[2]:`https://charts.kasten.io/` ![official helm repository](/images/blogs/en/kastenk10image/kastenk10-step6.png) -Once completed, the repository will find its status be "successful". +Once completed, the repository will find its status to be "successful". ![successful status](/images/blogs/en/kastenk10image/kastenk10-step7.png) ## Deploy Kasten K10 on Kubernetes to Backup and Restore Cluster @@ -76,7 +76,7 @@ global: create: "true" class: "nginx" ``` -Click "Deploy" and wait the status to turn into "running". +Click "Deploy" and wait for the status to turn into "running". ![apps](/images/blogs/en/kastenk10image/kastenk10-step13.png) Click "Deployment" to check if Kasten has deployed workload and is in running status. @@ -93,7 +93,7 @@ In “Application Workloads” - “Routes” page, we can find the Gateway of I Input `https://192.168.99.100/k10/#` to the browser for the following log-in interface; Input the company and e-mail address to sign up. ![login page](/images/blogs/en/kastenk10image/kastenk10-step16.png) -Set the locations for storing our backup data. In this case S3 compatible storage is selected. +Set the locations for storing our backup data. In this case, S3 compatible storage is selected. ![settings1](/images/blogs/en/kastenk10image/kastenk10-step17.png) @@ -105,7 +105,7 @@ Finally, start "K10 Disaster Recovery" and we can start to set "Disaster Recover ## Deploy Cloud Native Applications on Kubernetes -Kasten Dashboard holds 16 applications, which are shown as follows. We can create a Wordpress application with a Wordpress Pod and Mysql Pod, a typical application that is partly stateful and partly stateless. Here are the steps. +Kasten Dashboard holds 16 applications, which are shown as follows. We can create a WordPress application with a WordPress Pod and Mysql Pod, a typical application that is partly stateful and partly stateless. Here are the steps. ![kasten dashboard1](/images/blogs/en/kastenk10image/kastenk10-step19.png) @@ -149,7 +149,7 @@ In addition, applications of the WordPress can also be find in "Applications". ## Back Up Cloud Native Applications -Click "Create Policy" and create a data backup strategy. In such case, Kasten can protect applications by creating local snapshot, and back up the application data to cloud, thus to realize the long-term retention of data. +Click "Create Policy" and create a data backup strategy. In such a case, Kasten can protect applications by creating local snapshot, and back up the application data to cloud, thus to realize the long-term retention of data. ![create policy](/images/blogs/en/kastenk10image/kastenk10-step22.png) Click "Run Once" to start backup. @@ -183,7 +183,7 @@ In KubeSphere Dashboard, we can find these applications recovered are running. ## Summary -As a container platform, KubeSphere excels in cloud native application deployment. For application developers who are not familiar with Kubernetes and hope to make simple configuration to deploy Kasten, it is easy to follow the above steps and deploy Kasten with KubeSphere. KubeSphere helps to directly deploy the official Helm repository of Kasten K10, which performs well in data management, including backup, migration and disaster recovery. +As a container platform, KubeSphere excels in cloud native application deployment. For application developers who are not familiar with Kubernetes and hope to make simple configurations to deploy Kasten, it is easy to follow the above steps and deploy Kasten with KubeSphere. KubeSphere helps to directly deploy the official Helm repository of Kasten K10, which performs well in data management, including backup, migration and disaster recovery. ### Reference diff --git a/content/en/blogs/Kubernetes-multicluster-KubeSphere.md b/content/en/blogs/Kubernetes-multicluster-KubeSphere.md new file mode 100644 index 000000000..e2cf94991 --- /dev/null +++ b/content/en/blogs/Kubernetes-multicluster-KubeSphere.md @@ -0,0 +1,147 @@ +--- +title: 'Kubernetes Multi-cluster Management and Application Deployment in Hybrid Cloud' +tag: 'KubeSphere, Kubernetes, Multi-cluster Management' +keywords: 'KubeSphere, Kubernetes, Multi-cluster Management, KubeFed' +description: 'This post introduces Kubernetes multi-cluster management and shares how KubeSphere distributes and deploys applications in a unified manner using KubeFed in hybrid cloud.' +createTime: '2021-12-26' +author: 'Li Yu, Bettygogo' +snapshot: '/images/blogs/en/Kubernetes-multicluster-KubeSphere/00-federation-control-plane.png' +--- + +> This post introduces the development of Kubernetes multi-cluster management and existing multi-cluster solutions. It also shares how KubeSphere distributes and deploys applications in a unified manner using KubeFed in hybrid cloud for the purpose of achieving cross-region high availability and disaster recovery. Finally, it discusses the possibility of decentralized multi-cluster architecture. + +Before initiating KubeSphere v3.0, we made a survey in the community and found that most of the users called for multi-cluster management and application deployment in different cloud environments. To meet users' needs, we added the multi-cluster management feature in KubeSphere v3.0. + +## Kubernetes Architecture in a Single Cluster + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubeadm-HA.png) + +Kubernetes consists of the master and worker nodes. On the master node, the API server processes API requests, Controller Manager takes charge of starting multiple controllers and consistently coordinating the transition of declarative APIs from spec to status, Scheduler is used to schedule Pods, and etcd stores data of clusters. The worker nodes are mainly responsible for starting Pods. + +Enterprises have the following expectations, which cannot be met by a single cluster: + +- Physical isolation: Despite the fact that Kubernetes supports isolation by namespace, and you can set the CPU and memory usage of each namespace, and also use the network policy to configure network connectivity among namespaces, enterprises still need a completely isolated physical environment to make sure that services are independent from each other. + +- Hybrid cloud: To reduce the cost, enterprises expect a package of public cloud providers and private cloud solutions to prevent vendor lock-in. + +- Multi-site high availability for applications: To make sure that applications still work properly even though an electricity power outage occurs in a region, enterprises expect to deploy multiple replicas in clusters in different regions. + +- Independent development, test, and production environment: Enterprises want to separately deploy the development, test, and production environments in different clusters. + +- Scalability: A single cluster has a limited number of nodes, while multiple clusters are more scalable. + +The most common practice is to manage different clusters using multiple Kubeconfig files, and the frontend makes multiple API calls to simultaneously deploy services. However, KubeSphere manages clusters in a more cloud native way. + +We researched existing solutions, which mainly focus on the following: + +- Resource distribution on the control plane, such as Federation v1 and Federation v2 launched by the Kubernetes community and Argo CD/Flux CD (distributed application pipelines). + +- Network connectivity between Pods in different clusters, such as Cilium Mesh, Istio Multi-Cluster, and Linkerd Service Mirroring. As these projects are bound to specific CNI and service governance components, I'll only detail Federation v1 and Federation v2 in the following sections. + +## Federation v1 + +![](https://pek3b.qingstor.com/kubesphere-community/images/Federation-v1.png) + +In the architecture of Federation v1, we can find that more than one API server (developed based on Kube-Apiserver) and Controller Manager (similar to Kube-Controller-Manager) exist. The master node is responsible for creating resource distribution tasks and distributing the resources to the worker nodes. + +![](https://pek3b.qingstor.com/kubesphere-community/images/Replicaset.png) + +The previous figure shows configurations of creating ReplicaSets in Federation v1, and it can be seen that there are more annotations, which store logics of distributed resources. Federation v1 has the following drawbacks: + +- It introduces independently developed API servers, requiring extra maintenance. + +- In Kubernetes, an API is defined by Group/Version/Kind (GVK). Federation v1 only supports specific native Kubernetes APIs and GVKs, resulting in poor compatibility among clusters with different API versions. + +- Federation v1 does not support role-based access control (RBAC), making it unable to provide cross-cluster permission control. + +- Annotations-based resource distribution makes APIs too cumbersome. + +## Federation v2 + +The Kubernetes community developed Federation v2 (KubeFed) on the basis of Federation v1. KubeFed adopts the CRD + Controller solution, which does not introduce extra API sever and does not break into native Kubernetes APIs. + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubefed.png) + +In the architecture of KubeFed, we can find that a custom resource definition (CRD) consists of Template, Override, and Placement. With Type Configuration, it supports APIs with different versions, which improves cluster compatibility. Moreover, it supports federation of all resources, including CRDs, service discovery, and scheduling. + +The following exemplifies federated resources. Deployment in KubeSphere corresponds to FederatedDeployment in KubeFed. `template` in `spec` refers to the original Deployment resource, and `placement` refers to clusters where the federated resources need to be placed. In `overrides`, you can set parameters for different clusters, for example, you can set the image tag of each deployment and replicas in each cluster. + +![](https://pek3b.qingstor.com/kubesphere-community/images/联邦资源.png) + +However, KubeFed also has the following limitations: + +- Its APIs are complex and error-prone. + +- No independent SDKs are provided, and binding and unbinding clusters rely on kubefedctl. + +- It requires the network connectivity between the control plane cluster and the managed clusters, which means that APIs must be reconstructed in a multi-cluster scenario. + +- The earlier versions cannot collect status information about federated resources. + +## KubeSphere on KubeFed + +Next, I'll show you how KubeSphere implements and simplifies multi-cluster management on the basis of KubeFed. + +![00-federation-control-plane](/images/blogs/en/Kubernetes-multicluster-KubeSphere/00-federation-control-plane.png) + +In the previous figure, the host cluster refers to the cluster with KubeFed installed, and it acts as the control plane; and the member cluster refers to the managed cluster. The host and member clusters are federated. + +![01-cluster-management](/images/blogs/en/Kubernetes-multicluster-KubeSphere/01-cluster-management.png) + +It can be seen that users can manage multiple clusters in a unified manner. KubeSphere defines a Cluster Object, which extends Cluster Objects of KubeFed, for example, the region zone provider. + +![02-add-cluster](/images/blogs/en/Kubernetes-multicluster-KubeSphere/02-add-cluster.png) + +KubeSphere allows you to import clusters in the following ways: + +- Direct connection + +In this case, the network between the host cluster and member clusters must be accessible. All you have to do is to use a Kubeconfig file to add the target clusters without using the complex kubefedctl. + +- Agent connection + +If the network between the host cluster and member clusters is not accessible, KubeFed cannot support federation. Based on Chisel, KubeSphere makes Tower open source, so that users only need to create an agent to federate clusters on private cloud. + +![](https://pek3b.qingstor.com/kubesphere-community/images/Tower工作流程.png) + +The workflow of Tower is as follows: (1) After you create an agent in a member cluster, the member cluster will connect to the Tower server of the host cluster; (2) The Tower server then listens to the port previously assigned by Controller and establishes a tunnel to distribute resources from the host cluster to the member cluster. + +### Support Multi-tenant in Multi-cluster Scenarios + +![multi-tenant-support](/images/blogs/en/Kubernetes-multicluster-KubeSphere/multi-tenant-support.png) + +In KubeSphere, a tenant is a workspace, and CRDs are used to implement authorization and authentication of tenants. To make KubeFed to be less dependent on the control plane, KubeSphere delegates CRDs through the federation layer. After the host cluster receives an API request, it directly forwards the request to member clusters. Even the host cluster fails, the original tenant information is stored on the member clusters, and users can still log in to the console of the member clusters to deploy their services. + +### Deploy Applications in Multi-cluster Scenarios + +![create-stateless-service-png](/images/blogs/en/Kubernetes-multicluster-KubeSphere/create-stateless-service-png.png) + +It is complex and error-prone if we manually define KubeFed APIs. When we deploy applications on KubeSphere, we can directly select the cluster where the application is to be deployed and specify replicas, and configure image address and environment variables of different clusters in **Cluster Differences**. For example, if cluster A cannot pull the gcr.io image, you can use the DockerHub address. + +### Collect Status Information About Federated Resources + +![view-status](/images/blogs/en/Kubernetes-multicluster-KubeSphere/view-status.png) + +As we mentioned before, KubeFed cannot collect status information about federated resources. But don't worry, KubeSphere is always ready to help you. With our self-developed status collection tool, you can easily locate the event information and troubleshoot the failure, for example, when Pod creation fails. Moreover, KubeSphere can also monitor federated resources, which enhances observability. + +### Planned Improvements of KubeSphere + +Although KubeSphere simplifies federation among clusters on the basis of KubeFed, it also needs improvements. + +- On the centralized control plane, resources can only be distributed using the push strategy, which requires that the host cluster must be highly available. Kubefed community is exploring a new possibility, that is, pulling resources from the member cluster to the host cluster. + +- KubeSphere is an open community, and we hope that more users can join us. However, multi-cluster development needs developers to define a series of Types CRDs, which is not developer-friendly. + +- No ideal service discovery solutions are available in multi-cluster scenarios. + +- Currently, KubeSphere does not support Pod replica scheduling in multi-cluster scenarios. In the next version, we plan to introduce Replica Scheduling Preference. + +If you ask me whether it is possible to avoid introducing a centralized control plane and reducing the number of APIs in a multi-cluster scenario, my answer is definitely Liqo. But before we dig into Liqo, I'd like to introduce Virtual Kubelet first. + +![](https://pek3b.qingstor.com/kubesphere-community/images/Kubernetes-API.png) + +Virtual Kubelet allows you to simulate a Kubelet in your service as a Kubernetes node to join a Kubernetes cluster, making Kubernetes clusters more scalable. + +![](https://pek3b.qingstor.com/kubesphere-community/images/API-transparency.png) + +In Liqo, clusters are not federated. In the figure on the left, K2 and K3 clusters are the member clusters of K1 under the Kubefed architecture, and the resources distribution needs to be pushed by K1. In the figure on the right, K2 and K3 are just a node of K1. In this case, when we deploy applications, we don't need to introduce any API, K2 and K3 seem to be nodes of K1, and the services can be smoothly deployed to different clusters, which greatly reduces the complexity of transforming from a single cluster to multiple clusters. However, Liqo is still at its early stage and currently does not support topologies with more than two clusters. KubeSphere will continuously follow other open-source multi-cluster management solutions to better satisfy your needs. \ No newline at end of file diff --git a/content/en/blogs/TiDB-on-KubeSphere-upload-helm-chart.md b/content/en/blogs/TiDB-on-KubeSphere-upload-helm-chart.md index 2372eb5c6..d3f411006 100644 --- a/content/en/blogs/TiDB-on-KubeSphere-upload-helm-chart.md +++ b/content/en/blogs/TiDB-on-KubeSphere-upload-helm-chart.md @@ -115,7 +115,7 @@ You can release apps you have uploaded to KubeSphere to the public repository, a ![app-template-list](https://ap3.qingstor.com/kubesphere-website/docs/20201201150748.png) -4. On the detail page, click the version number to expand the menu where you can delete the version, deploy the app to test it, or submit it for review. KubeSphere allows you to manage an app across its entire lifecycle. For an enterprise, this is very useful when different tenants need to be isolated from each other and are only responsible for their own part as they manage an app version. For demonstration purposes, I will use the account `admin` to perform all the operations. As we do not need to test the app, click **Submit Review** directly. +4. On the detail page, click the version number to expand the menu where you can delete the version, deploy the app to test it, or submit it for review. KubeSphere allows you to manage an app across its entire lifecycle. For an enterprise, this is very useful when different tenants need to be isolated from each other and are only responsible for their own part as they manage an app version. For demonstration purposes, I will use the user `admin` to perform all the operations. As we do not need to test the app, click **Submit Review** directly. ![detail-page](https://ap3.qingstor.com/kubesphere-website/docs/20201201150948.png) diff --git a/content/en/blogs/TiDB-on-KubeSphere-using-qke.md b/content/en/blogs/TiDB-on-KubeSphere-using-qke.md index 3b94ccb72..3cd0d57e9 100644 --- a/content/en/blogs/TiDB-on-KubeSphere-using-qke.md +++ b/content/en/blogs/TiDB-on-KubeSphere-using-qke.md @@ -60,7 +60,7 @@ Therefore, I select QingCloud Kubernetes Engine (QKE) to prepare the environment 5. Now, let's get back to the **Access Control** page where all the workspaces are listed. Before I proceed, first I need to create a new workspace (e.g. `dev-workspace`). - In a workspace, different users have different permissions to perform varied tasks in projects. Usually, a department-wide project requires a multi-tenant system so that everyone is responsible for their own part. For demonstration purposes, I use the account `admin` in this example. You can [see the official documentation of KubeSphere](https://kubesphere.io/docs/quick-start/create-workspace-and-project/) to know more about how the multi-tenant system works. + In a workspace, different users have different permissions to perform varied tasks in projects. Usually, a department-wide project requires a multi-tenant system so that everyone is responsible for their own part. For demonstration purposes, I use the user `admin` in this example. You can [see the official documentation of KubeSphere](https://kubesphere.io/docs/quick-start/create-workspace-and-project/) to know more about how the multi-tenant system works. ![create-workspace](https://ap3.qingstor.com/kubesphere-website/docs/20201026192648.png) diff --git a/content/en/blogs/apache-log4j2-vulnerability-solution.md b/content/en/blogs/apache-log4j2-vulnerability-solution.md new file mode 100644 index 000000000..e00991c76 --- /dev/null +++ b/content/en/blogs/apache-log4j2-vulnerability-solution.md @@ -0,0 +1,81 @@ +--- +title: 'KubeSphere Recommendations for Responding to Apache Log4j 2 Vulnerabilities' +tag: 'CVE vulnerability' +keywords: 'Elasticsearch, Apache Log4j, security vulnerability, KubeSphere' +description: 'Apache Log4j 2 is an open-source logging tool that is used in a wide range of frameworks. Recently, Apache Log4j 2 vulnerabilities have been reported. This article provides KubeSphere users with recommendations for fixing the vulnerabilities.' +createTime: '2021-12-21' +author: 'KubeSphere Team' +snapshot: '../../../images/blogs/log4j/log4j.jpeg' +--- + +Apache Log4j 2 is an open-source logging tool that is used in a wide range of frameworks. Recently, Apache Log4j 2 vulnerabilities have been reported. This article provides KubeSphere users with recommendations for fixing the vulnerabilities. + +In Log4j 2, the lookup functionality allows developers to read specific environment configurations by using some protocols. However, it does not scrutinize the input during implementation, and this is where the vulnerabilities come in. A large number of Java-based applications have been affected, including Apache Solr, srping-boot-strater-log4j2, Apache Struts2, ElasticSearch, Dubbo, Redis, Logstash, Kafka, and so on. For more information, see [Log4j 2 Documentation](https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/usages?p=1). + +Apache Log4j versions 2.x to 2.15.0-rc2 are affected. Currently, Apache has released Apache 2.15.0-rc2 to fix the vulnerabilities. However, this release is not stable. If you plan to upgrade to Apache 2.15.0-rc2, we recommend that you back up your data first. + +The KubeSphere team provides the following three workarounds to fix the vulnerabilities. + +- Set the value of environment variable `FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS` to `true`. +- Add `log4j2.formatMsgNoLookups=True` to the configmap file. +- Set the `-Dlog4j2.formatMsgNoLookups=true` JVM option. + +## Workaround 1: Change the value of the environment variable + +KubeSphere uses Elasticsearch to collect logs by default, so it's necessary to fix the vulnerabilities on KubeSphere. The following describes how to fix Elasticsearch. + +Run the following commands to edit the Elasticsearch YAML files. + +```yaml +kubectl edit statefulset elasticsearch-logging-data -n kubesphere-logging-system +kubectl edit statefulset elasticsearch-logging-discovery -n kubesphere-logging-system +``` + +Set the value of `FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS` to `true`. + +```yaml +env: +- name: FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS + value: "true" +``` + +## Workaround 2: Change Log4j 2 configurations + +Run the following command to edit the configmap file. + +```yaml +kubectl edit configmaps elasticsearch-logging -n kubesphere-logging-system +``` + +Add `log4j2.formatMsgNoLookups=True` to the `log4j2.properties` section. + +```yaml +log4j2.properties: |- + status=error + appender.console.type=Console + appender.console.name=console + appender.console.layout.type=PatternLayout + appender.console.layout.pattern=[%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + rootLogger.level=info + rootLogger.appenderRef.console.ref=console + logger.searchguard.name=com.floragunn + logger.searchguard.level=info + # Add the parameter here. + log4j2.formatMsgNoLookups=true +``` + + + +> Note: +> +> 1. After you add the parameter, check whether it has been mounted successfully. If not, restart the pod. +> +> 2. If you have re-installed the KubeSphere logging component, configmap configurations may be reset. In this case, add the parameter again according to Workaround 2, or you can use Workaround 1. + +## Workaround 3: Change the JVM parameter of Elasticsearch + +You can also set the JVM option `-Dlog4j2.formatMsgNoLookups=true`. For more information, see the [Elasticsearch announcement](https://discuss.elastic.co/t/apache-log4j2-remote-code-execution-rce-vulnerability-cve-2021-44228-esa-2021-31/291476). + +## Reference + +Artifacts using Apache Log4j Core: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/usages?p=1 \ No newline at end of file diff --git a/content/en/blogs/aws-kubernetes.md b/content/en/blogs/aws-kubernetes.md index 38120fe19..e3147e3cc 100644 --- a/content/en/blogs/aws-kubernetes.md +++ b/content/en/blogs/aws-kubernetes.md @@ -1,5 +1,5 @@ --- -title: 'How to Deploy an HA Kubernetes Cluster on AWS | KubeSphere KubeKey' +title: 'How to Deploy Kubernetes on AWS' tag: 'Kubernetes, HA, High Availability, AWS, KubeKey' keywords: 'Kubernetes, HA, High Availability, AWS, KubeKey, KubeSphere' description: 'The KubeKey tool can be used to quickly and efficiently deploy an HA Kubernetes cluster. This article demonstrates how to deploy an HA Kubernetes cluster on AWS.' @@ -17,7 +17,7 @@ To meet the HA service requirements of Kubernetes in AWS, we need to ensure the This article uses the AWS ELB service as an example. -## Prerequisites +## Prerequisites for Deployment on AWS - You need to create a storage system based on NFS, GlusterFS, or Ceph. In consideration of data persistence, we do not recommend OpenEBS for production environments. This article uses OpenEBS to configure LocalPV as the default storage service only for testing. - All nodes can be accessed over SSH. @@ -136,7 +136,7 @@ sudo systemctl restart sshd Download KubeKey from the [Github Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command: ``` -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.0 sh - ``` ## Use KubeKey to Deploy a Kubernetes Cluster @@ -252,4 +252,4 @@ Run the following commands to check the deployment result: kubernetes 192.168.0.10:6443,192.168.0.11:6443,192.168.0.12:6443 5m10s ``` - \ No newline at end of file + diff --git a/content/en/blogs/dockershim-out-of-kubernetes.md b/content/en/blogs/dockershim-out-of-kubernetes.md index c1353b66e..f61bca3d4 100644 --- a/content/en/blogs/dockershim-out-of-kubernetes.md +++ b/content/en/blogs/dockershim-out-of-kubernetes.md @@ -93,6 +93,8 @@ As KubeSphere supports any implementation of the Kubernetes CRI, you can easily ```bash systemctl enable containerd && systemctl restart containerd ``` + +> If `containerd config dump |grep sandbox_image` still shows `k8s.gcr.io/pause:xxx`, please add `version = 2` to the beginning of `/etc/containerd/config.toml` and run `systemctl restart containerd`. 4. Install crictl. diff --git a/content/en/blogs/how-to-use-kubesphere-project-gateways-and-routes.md b/content/en/blogs/how-to-use-kubesphere-project-gateways-and-routes.md new file mode 100644 index 000000000..7c8ae327d --- /dev/null +++ b/content/en/blogs/how-to-use-kubesphere-project-gateways-and-routes.md @@ -0,0 +1,109 @@ +--- +title: 'How to Use KubeSphere Project Gateways and Routes' +tag: 'KubeSphere, Kubernetes' +keywords: 'KubeSphere, Kubernetes, Gateway, Spring Cloud' +description: 'This article introduces the architecture of Routes, compares Routes with Kubernetes Services and other gateways, and uses SockShop as an example to demonstrate how to configure Routes.' +createTime: '2021-11-15' +author: 'Roland Ma, Patrick Luo' +snapshot: '/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/snapshot.png' +--- + +KubeSphere project gateways and Routes provide a method for aggregating Services, which allows you to expose multiple Services by using a single IP address in HTTP or HTTPS mode. You can configure routing rules by using a domain name and multiple paths in a Route. The routing rules map different paths to different Services. You can also configure options such as HTTPS offloading in a Route. Project gateways forward external requests to Services according to routing rules configured in Routes. + +## Overall Architecture + +Project gateways are used to aggregate Services. Therefore, we can understand the project gateway architecture from the perspective of Services. The following figure shows the architecture of a project gateway in a typical production environment. + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370451-193428-kubernetes-ingress.png) + +The architecture contains four parts: + +* Nginx Ingress Controller, which is the core component of the project gateway architecture. Nginx Ingress Controller functions as a reverse proxy and obtains reverse proxy rules (routing rules) from Routes. A Route in KubeSphere is the same as an Ingress in Kubernetes. A project gateway is in effect an Nginx reverse proxy exposed by using a Service. In a production environment, the Service is usually a LoadBalancer Service, which uses a public IP address and an external load balancer provided by a cloud vendor to ensure high availability. +* External load balancer, which is generated according to the Service settings and is usually provided by a cloud vendor. Features such as SLA, bandwidth, and IP configuration of different load balancers may vary. You can usually use annotations to configure the load balancer. Different cloud vendors may support different annotations. +* Domain name resolution service, which is usually provided by a DNS provider. You can configure DNS records to map a domain name to the public IP address of the load balancer. If the IP address is also used by subdomain names, you can also use wildcard characters to map multiple subdomain names to the same IP address. +* Services and Routes. You need to create Services to expose applications, and create Routes to aggregate multiple Services. Note that Nginx Ingress Controller does not use kube-proxy to forward traffic to Services. It obtains Endpoints corresponding to Pods from Services and set them as upstream targets of Nginx. Therefore, Nginx is directly connected to Pods, which avoids extra network overheads caused by Services. + +### Compare Routes with LoadBalancer Services + +In practice, people might be confused about the application scenarios of Routes and Services. Both of them are used to expose applications to outside the Kubernetes cluster and provide load balancing. In addition, Routes seem to depend on Services. So what are their differences? We can discuss this issue from the following perspectives: + +* Services are originally designed to abstract application back-ends (Pods) for access over the network. All back-ends of an application are the same and are exposed using the same Service. By contrast, Routes are designed to manage API objects. Although a Route can also be used to expose a single Service, its more powerful feature is that it can aggregate multiple Services and provide a unified IP address and domain name for external access. +* Services work at layer 4 of the OSI model and use combinations of IP addresses, ports, and protocols as unique identifiers. Therefore, IP addresses of different Services on the same network cannot be the same. For example, HTTP/HTTPS-based Services typically use ports 80 and 443. When using these Services to expose applications, you need to assign different IP addresses to different Services to avoid port conflicts, which is a waste of resources. Routes work at layer 7 of the OSI model, and all Services exposed by using Routes can share the IP address and ports 80 and 443 of the same project gateway. Each Route uses a domain name and multiple paths as unique identifiers of different Services. The project gateway forwards HTTP requests to different Services based on the domain name and paths configured in Routes. +* Services support both TCP and UDP and do not restrict upper-layer protocols, while Routes support only HTTP, HTTPS and HTTP2 and cannot forward TCP-based or UDP-based requests. + +From the preceding analysis, we can draw a conclusion that Routes are ideal for HTTP-based microservice architectures while Services support more protocols, though Services are not the best choice for HTTP-based applications. + +### Compare Routes with Spring Cloud Gateway and Ocelot + +Java and .NET Core developers must be familiar with Spring Cloud Gateway and Ocelot, which are most frequently used API gateways in Java and .NET Core respectively. So can we use these gateways directly instead of Routes and Services? To discuss this issue, we need to first have a basic understanding of API gateways: + +> An API gateway is the sole entrance for clients to access back-end services. It functions as a reverse proxy for aggregating back-end services, routes client requests to back-end services, and returns service responses to clients. An API gateway also provides advanced features such as authentication, monitoring, load balancing, and HTTPS offloading. + +Therefore, Routes and API gateways such as Spring Cloud Gateway and Ocelot provide similar functions. For example, you can use a Service to expose Spring Cloud Gateway to outside the cluster to achieve certain features of a Route. The following briefly analyzes their pros and cons: + +* As application gateways, all of them can be used to forward traffic. In addition, all of them support routing rules based on domain names and paths. +* In terms of service registration and discovery, all-in-one solutions such as Spring Cloud Gateway provide rich features and are more friendly to Java developers. Services can be seamlessly integrated by using a registration center. Ocelot does not provide a built-in service discovery and registration scheme, but you can achieve this feature by using both Ocelot and Consul. Applications deployed in a Kubernetes cluster typically use DNS-based service discovery, but no unified service registration and discovery scheme is available for clients. You need to explicitly define routing rules in a Route to expose Services. By contrast, Spring Cloud Gateway fits well into the technology stack of the development language, which makes learning much easier for developers. +* In terms of universality, Routes (Ingresses) are the cloud-native API management standard defined by the Kubernetes community. KubeSphere uses Nginx Ingress Controller to implement the functionality of Routes by default. Meanwhile, KubeSphere is also compatible with other Ingress controllers. Routes provides only common features, while project gateways provide more operations and maintenance (O\&M) tools such as logging, monitoring, and security. By contrast, API gateways are tightly coupled with programming languages and development platforms. Usually API gateways cannot be used across different languages without the introduction of more technology stacks or client support. API gateways usually provide relatively stable features and support rich interfaces for plugins, allowing developers to extend the features by using languages they are familiar with. +* In terms of performance, Routes based on Nginx Ingress Controller evidently outperform Spring Cloud Gateway and Ocelot. + +Overall, each type of gateway has its own advantages and disadvantages. In the initial phase of a project, the gateway architecture should be considered. In cloud-native scenarios, Routes are an ideal choice. If your team depends on a specific technology stack, the API gateway of the technology stack is preferred. However, this does not necessarily mean you can only use one type of gateway. In some complex scenarios, you can use different types of gateways to utilize their advantages. For example, developers can use API gateways that they are familiar with to implement features such as service aggregation and authentication, and use Routes to expose these API gateways to implement features such as logging, monitoring, load balancing, and HTTPS offloading. For example, Microsoft's microservice architecture demo [eShopOnContainers](https://docs.microsoft.com/en-us/dotnet/architecture/cloud-native/introduce-eshoponcontainers-reference-app "eShopOnContainers") uses this hybrid architecture. + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370654-571190-eshoponcontainers-architecture-aggregator-services.png) + +## Hands-on Practice + +So far we have discussed the application scenarios and overall architecture of Routes. In the following we will demonstrate how to configure a project gateway and Route in KubeSphere. The following uses SockShop as an example, which is a microservice demo project of Weaveworks. SockShop uses an architecture where the front-end and back-end are separated. It consists of the `front-end` Service and back-end Services such as `catalogue`, `carts`, and `orders`. In the architecture, the `front-end` Service not only provides static pages, but also functions as a proxy that forwards traffic to back-end APIs. Assume that asynchronous service blocking occurs when Node.js forwards traffic to APIs, which deteriorates page performance. To address this problem, we can use Routes to directly forward traffic to the `catalogue` Service. The following describes the configuration procedure. + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370560-468146-socksshop.png) + +### Preparations + +* Before deploying SockShop, you need to create a workspace and a project (for example, `workspace-demo` and `sock-shop`). For details, please refer to [Create Workspaces, Projects, Users, and Roles](https://kubesphere.com.cn/docs/quick-start/create-workspace-and-project/ "Create Workspaces, Projects, Users, and Roles").[](https://kubesphere.io/docs/quick-start/create-workspace-and-project/) + +* After the `sock-shop` project is created, you need to use kubectl to deploy Services related to SockShop. You can use your local CLI console or kubectl provided by the KubeSphere toolbox to run the following command. + + ``` + kubectl -n sock-shop apply -f https://github.com/microservices-demo/microservices-demo/raw/master/deploy/kubernetes/complete-demo.yaml + ``` + +After the preceding preparations are complete, go to the **Workloads** page of the `sock-shop` project to check the workload status, and wait until all Deployments are running properly before proceeding to the next step. + +![deployment-list](/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/deployment-list.png) + +### Enable the Project Gateway + +1. Go to the `sock-shop` project, select **Project Settings** > **Advanced Settings** on the left navigation pane, and click **Enable Gateway**. + +2. In the displayed dialog box, set parameters based on the KubeSphere installation environment. If you are using a local development environment or a private environment, you can set the gateway access mode to `NodePort`. If you are using a managed Kubernetes environment, you can set the gateway access mode to `LoadBalancer` for high availability. + +### Create a Route + +1. In the left navigation pane, select **Application Workloads** > **Routes**, and click **Create** on the right. On the **Basic Information** tab, set **Name** to `front-end`. On the **Routing Rules** tab, add a routing rule. This example uses the **Auto Generate** mode. The system will automatically generate a domain name in the `...nip.io` format, and the domain name will be resolved by nip.io into the gateway IP address. Set the path, Service, and port to `/`, `front-end`, and `80` respectively. Click **Next**, and then click **Create**. + + ![deployment-list](/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/create-route.png) + +2. After the Route is created, click `front-end` in the Route list to view its details. On the **Resource Status** tab, click **Access Service**. If the Route functions properly, the following web page will be displayed. + + ![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371245-961841-sockshop.png) + +3. Open the debugging console of your web browser (for example, press **F12** for Chrome) to check the network requests of the SockShop web page. The following figure shows an API request sent to `catalogue`. + + ![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371262-490907-f12.png) + + `X-Powered-By: Express` in `Response Headers` shows that the request is forwarded by the `front-end` Node.js application. + +4. On the details page of `front-end`, select **More** > **Edit Routing Rules**. In the displayed **Edit Routing Rules** dialog box, select the routing rule created in step 1, and click the edit icon on the right. Click **Add** to add a new path, and set the path, Service, and port to `/catalogue`, `catalogue`, and `80` respectively. Click **Save** to save the settings. The following figure shows the edited rule. + + ![deployment-list](/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/routing-rules.png) + +5. Refresh the SockShop web page (the page shows no changes) and check the network requests in the debugging console, as shown in the following figure. + + ![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371313-315498-f12-after.png) + + `X-Powered-By: Express` does not exist in `Response Headers`, which means that the API request is directly sent to the `catalogue` Service according to the new routing rule without being forwarded by the `front-end` Service. In this example, two routing rules are configured in the Route. The `/catalogue` routing rule is preferred to the `/` routing rule because the path of the former is longer and therefore more accurate. + + For more information about the Route settings, please refer to [Routes](https://kubesphere.io/docs/project-user-guide/application-workloads/routes/ "Routes"). + +## Summary + +This article briefly introduces the architecture of Routes, and compares Routes with Kubernetes Services and other application gateways. The SockShop example shows how to configure a project gateway and a Route. We would be delighted if this article can help you better understand Routes and choose the most appropriate method to expose applications based on their characteristics. \ No newline at end of file diff --git a/content/en/blogs/install-kubernetes-containerd-multus.md b/content/en/blogs/install-kubernetes-containerd-multus.md new file mode 100644 index 000000000..23a3ab52c --- /dev/null +++ b/content/en/blogs/install-kubernetes-containerd-multus.md @@ -0,0 +1,259 @@ +--- +title: 'Install Kubernetes 1.23, containerd, and Multus CNI the Easy Way' +tag: 'Kubernetes, KubeKey' +keywords: 'Kubernetes, containerd, docker, Multus CNI, ' +description: 'Install Kubernetes 1.23, containerd, and Multus CNI in a Linux machine within minutes.' +createTime: '2021-12-26' +author: 'Feynman' +snapshot: '/images/blogs/en/kubekey-containerd/kubernetes-containerd-banner.png' +--- + +![k8s-containerd](/images/blogs/en/kubekey-containerd/kubernetes-containerd-banner.png) + +[KubeKey](https://github.com/kubesphere/kubekey) is a lightweight and turn-key installer that supports the installation of Kubernetes, KubeSphere and related add-ons. Writtent in Go, KubeKey enables you to set up a Kubernetes cluster within minutes. + +Kubernetes 1.23 [was released on Dec 7](https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/). KubeKey has supported the installation of the latest version Kubernetes in its v2.0.0 alpha release, and also brought some new features such as support for Multus CNI, Feature Gates, and easy-to-use air-gapped installation, etc. + +This blog will demonstrate how to install Kubernetes 1.23.0, [containerd](https://containerd.io/), and [Multus CNI](https://github.com/k8snetworkplumbingwg/multus-cni) the easy way using KubeKey. + +## Step 1: Prepare a Linux Machine + +You need to prepare one or more hosts according to the following requirements for hardware and operating system. This blog uses a Linux server to start the all-in-one installation. + +### Hardware Recommendations + + + + + + + + + + + + + + + + + + + + + + + + + +
OSMinimum Requirements
Ubuntu 16.04, 18.042 CPU cores, 2 GB memory, and 40 GB disk space
Debian Buster, Stretch2 CPU cores, 2 GB memory, and 40 GB disk space
CentOS 7.x2 CPU cores, 2 GB memory, and 40 GB disk space
Red Hat Enterprise Linux 72 CPU cores, 2 GB memory, and 40 GB disk space
SUSE Linux Enterprise Server 15/openSUSE Leap 15.22 CPU cores, 2 GB memory, and 40 GB disk space
+ +### Node requirements + +- The node can be accessed through `SSH`. +- `sudo`/`curl`/`openssl` should be used. + +### Dependency requirements + +The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the following list to see if you need to install relevant dependencies on your node in advance. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DependencyKubernetes Version ≥ 1.18Kubernetes Version < 1.18
socatRequiredOptional but recommended
conntrackRequiredOptional but recommended
ebtablesOptional but recommendedOptional but recommended
ipsetOptional but recommendedOptional but recommended
+ +In case you use a CentOS 7.7 server, you could install socat and conntrack using the following commands: + +``` +yum install socat +yum install conntrack-tool +``` + +### Network and DNS requirements + +- Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in the cluster. +- If your network configuration uses firewall rules or security groups, you must ensure infrastructure components can communicate with each other through specific ports. It is recommended that you turn off the firewall. For more information, see [Port Requirements](../../docs/installing-on-linux/introduction/port-firewall/). +- Supported CNI plugins: Calico, Flannel, Cilium, Kube-OVN, and Multus CNI + +## Step 2: Download KubeKey + +Perform the following steps to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command: + +```bash +curl -L https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.4/kubekey-v2.0.0-alpha.4-linux-amd64.tar.gz > installer.tar.gz && tar -zxf installer.tar.gz +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -L https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.4/kubekey-v2.0.0-alpha.4-linux-amd64.tar.gz > installer.tar.gz && tar -zxf installer.tar.gz +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the following steps. + +{{}} + +{{}} + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +## Step 3: Enable the Multus CNI installation (Optional) + +If you want to customize the installation, for example, enable the Multus CNI installation, you can create an example configuration file with default configurations. +Otherwise, you can skip this step. + +``` +./kk create config --with-kubernetes v1.23.0 +``` + +A default file `config-sample.yaml` will be created if you do not change the name. Edit the file and here is an example of the configuration file of a Kubernetes cluster with one master node. You need to update the host information and enable Multus CNI. We use a single node for this demo, and you can also configure a multi-node Kubernetes cluster as you want. See [Multi-node installation](../../docs/installing-on-linux/introduction/multioverview/) for details. + +``` +apiVersion: kubekey.kubesphere.io/v1alpha2 +kind: Cluster +metadata: + name: sample +spec: + hosts: // updated the host template refer to this example + - {name: master1, address: 192.168.0.5, internalAddress: 192.168.0.5, password: Qcloud@123} + roleGroups: + etcd: + - master1 + master: + - master1 + worker: + - master1 + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.23.0 + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + # multus support. https://github.com/k8snetworkplumbingwg/multus-cni + enableMultusCNI: true // Change false to true to enable Multus CNI +``` + +## Step 4: Get Started with Installation + +{{< tabs >}} + +{{< tab "If you have enabled Multus CNI" >}} + +You can run the following command to create a cluster using the configuration file. + +```bash +./kk create cluster -f config-sample.yaml --container-manager containerd +``` + +{{}} + +{{< tab "If you skiped Multus CNI above" >}} + +You only need to run one command for all-in-one installation. + +```bash +./kk create cluster --with-kubernetes v1.23.0 --container-manager containerd +``` + +{{}} + +{{}} + +{{< notice note >}} + +- Supported Kubernetes versions: v1.19.8, v1.20.4, v1.21.4, v1.22.1, v1.23.0. If you do not specify a Kubernetes version, KubeKey installs Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](https://github.com/kubesphere/kubekey/blob/master/docs/kubernetes-versions.md). +- KubeKey supports AMD64 and ARM64. + +{{}} + +After you run the command, you will see a table for environment check. For details, see [Node requirements](#node-requirements) and [Dependency requirements](#dependency-requirements). Type `yes` to continue. + +## Step 5: Verify the Installation + +If the following information is displayed, Kubernetes is successfully installed. + +```bash +INFO[00:40:00 CST] Congratulations! Installation is successful. +``` + +Run the following command to check the container runtime and Kubernetes version. + +```bash +$ kubectl get node -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +i-a26jzcsm Ready control-plane,master,worker 7h56m v1.23.0 192.168.0.5 CentOS Linux 7 (Core) 3.10.0-1160.el7.x86_64 containerd://1.4.9 +``` + +Run the following command to check the Pod status. + +```bash +kubectl get pod -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system calico-kube-controllers-64d69886fd-c5qd9 1/1 Running 0 7h57m +kube-system calico-node-lc4fg 1/1 Running 0 7h57m +kube-system coredns-7c94484977-nvrdf 1/1 Running 0 7h57m +kube-system coredns-7c94484977-rtc24 1/1 Running 0 7h57m +kube-system kube-apiserver-i-a26jzcsm 1/1 Running 0 7h57m +kube-system kube-controller-manager-i-a26jzcsm 1/1 Running 0 7h57m +kube-system kube-multus-ds-btb42 1/1 Running 0 7h30m +kube-system kube-proxy-bntt9 1/1 Running 0 7h57m +kube-system kube-scheduler-i-a26jzcsm 1/1 Running 0 7h57m +kube-system nodelocaldns-zmx9t 1/1 Running 0 7h57m +``` + +Congratulations! You have installed a sing-node Kubernetes 1.23.0 cluster with containerd and Multus CNI. For advanced usage of KubeKey, see [Installing on Linux — Overview](https://kubesphere.io/docs/installing-on-linux/introduction/intro/) for more information. \ No newline at end of file diff --git a/content/en/blogs/install-kubernetes-containerd.md b/content/en/blogs/install-kubernetes-containerd.md index 46f73adc7..58869e800 100644 --- a/content/en/blogs/install-kubernetes-containerd.md +++ b/content/en/blogs/install-kubernetes-containerd.md @@ -1,6 +1,6 @@ --- title: 'Install Kubernetes 1.22 and containerd the Easy Way' -tag: 'Kubernetes, containerd' +tag: 'Kubernetes, KubeKey' keywords: 'Kubernetes, containerd, docker, installation' description: 'Install Kubernetes and containerd in a Linux machine within minutes.' createTime: '2021-09-29' @@ -51,7 +51,7 @@ To get started with all-in-one installation, you only need to prepare one host a ### Dependency requirements -KubeKey can install Kubernetes and KubeSphere together. The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the following list to see if you need to install relevant dependencies on your node in advance. +The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the following list to see if you need to install relevant dependencies on your node in advance. @@ -84,7 +84,7 @@ KubeKey can install Kubernetes and KubeSphere together. The dependency that need ### Network and DNS requirements - Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in the cluster. -- If your network configuration uses firewall rules or security groups, you must ensure infrastructure components can communicate with each other through specific ports. It is recommended that you turn off the firewall. For more information, see [Port Requirements](../../installing-on-linux/introduction/port-firewall/). +- If your network configuration uses firewall rules or security groups, you must ensure infrastructure components can communicate with each other through specific ports. It is recommended that you turn off the firewall. For more information, see [Port Requirements](../../docs/installing-on-linux/introduction/port-firewall/). - Supported CNI plugins: Calico and Flannel. Others (such as Cilium and Kube-OVN) may also work but note that they have not been fully tested. ## Step 2: Download KubeKey diff --git a/content/en/blogs/install-kubernetes-using-kubekey.md b/content/en/blogs/install-kubernetes-using-kubekey.md index a2bb16688..0ea0d06c0 100644 --- a/content/en/blogs/install-kubernetes-using-kubekey.md +++ b/content/en/blogs/install-kubernetes-using-kubekey.md @@ -1,5 +1,5 @@ --- -title: 'KubeKey: A Lightweight Installer for Kubernetes and Cloud Native Addons' +title: 'How to Install Kubernetes the Easy Way Using KubeKey' keywords: Kubernetes, KubeSphere, KubeKey, addons, installer description: KubeKey allows you to deploy a Kubernetes cluster in the most graceful and efficient way. tag: 'KubeSphere, Kubernetes, KubeKey, addons, installer' @@ -26,7 +26,7 @@ The general steps of installing Kubernetes using KubeKey: ## Prepare Hosts -I am going to create a cluster with three nodes on cloud. Here is my machine configuration for your reference: +I am going to create a Kubernetes cluster with three nodes on cloud. Here is my machine configuration for your reference: | Host IP | Host Name | Role | System | | ----------- | --------- | ------------ | ----------------------------------------- | @@ -93,7 +93,7 @@ You can use KubeKey to install a specified Kubernetes version. The dependency th The default Kubernetes version is v1.17.9. For more information about supported Kubernetes versions, see this [file](https://github.com/kubesphere/kubekey/blob/master/docs/kubernetes-versions.md). Execute the following command as an example: ```bash - ./kk create config --with-kubernetes v1.17.9 + ./kk create config --with-kubernetes v1.20.4 ``` 4. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. @@ -161,7 +161,7 @@ You can use KubeKey to install a specified Kubernetes version. The dependency th - `worker`: worker node names. - You can provide more values in this configuration file, such as `addons`. KubeKey can install all [addons](https://github.com/kubesphere/kubekey/blob/release-1.0/docs/addons.md) that can be installed as a YAML file or Chart file. For example, KubeKey does not install any storage plugin for Kubernetes by default, but you can [add your own storage systems](https://kubesphere.io/docs/installing-on-linux/persistent-storage-configurations/understand-persistent-storage/), including NFS Client, Ceph, and Glusterfs. For more information about the configuration file, see [Kubernetes Cluster Configurations](https://kubesphere.io/docs/installing-on-linux/introduction/vars/) and [this file](https://github.com/kubesphere/kubekey/blob/release-1.0/docs/config-example.md). + You can provide more values in this configuration file, such as `addons`. KubeKey can install all [addons](https://github.com/kubesphere/kubekey/blob/release-1.0/docs/addons.md) that can be installed as a YAML file or Chart file. For example, KubeKey does not install any storage plugin for Kubernetes by default, but you can [add your own storage systems](https://kubesphere.io/docs/installing-on-linux/persistent-storage-configurations/understand-persistent-storage/), including NFS Client, Ceph, and GlusterFS. For more information about the configuration file, see [Kubernetes Cluster Configurations](https://kubesphere.io/docs/installing-on-linux/introduction/vars/) and [this file](https://github.com/kubesphere/kubekey/blob/release-1.0/docs/config-example.md). 6. Save the file when you finish editing and execute the following command to install Kubernetes: diff --git a/content/en/blogs/install-nfs-server-client-for-kubesphere-cluster.md b/content/en/blogs/install-nfs-server-client-for-kubesphere-cluster.md index 6531924a6..dbee58bc8 100644 --- a/content/en/blogs/install-nfs-server-client-for-kubesphere-cluster.md +++ b/content/en/blogs/install-nfs-server-client-for-kubesphere-cluster.md @@ -190,7 +190,7 @@ Now that we have our server machine ready, we need to install `nfs-common` on al 3. Specify a Kubernetes version and a KubeSphere version that you want to install. For more information about supported Kubernetes versions, see [this list](https://github.com/kubesphere/kubekey/blob/master/docs/kubernetes-versions.md). ```bash - ./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.0.0 + ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.0.0 ``` 4. A default file `config-sample.yaml` will be created if you do not customize the name. Edit the file. diff --git a/content/en/blogs/integrate-okta.md b/content/en/blogs/integrate-okta.md new file mode 100644 index 000000000..b54fbc1a5 --- /dev/null +++ b/content/en/blogs/integrate-okta.md @@ -0,0 +1,134 @@ +--- +title: 'Integrate KubeSphere with Okta Authentication' +keywords: Kubernetes, KubeSphere, Okta, OIDC, Authentication +description: Explore third-party authentication integration with KubeSphere. +tag: 'Kubernetes, KubeSphere, Okta, OIDC, Authentication' +createTime: '2021-12-01' +author: 'Roland Ma, Felix' +snapshot: '/images/blogs/en/okta/oidc.png' +--- + +KubeSphere, with [its latest release of 3.2.0](../kubesphere-3.2.0-ga-announcement/), provides a built-in authentication service based on [OpenID Connect](https://openid.net/connect/) (OIDC) in addition to its support for AD/LDAP and OAuth 2.0 identity authentication systems. You can easily integrate your existing identify providers that support the OIDC standard. + +This article uses [Okta](https://www.okta.com/) as an example to look into the process of how to integrate KubeSphere with an OIDC identity provider. + +## What is OpenID Connect? + +OpenID Connect (OIDC) is an identity layer built on top of the OAuth 2.0 framework. As an open authentication protocol, OIDC allows clients to verify the identity of an end user and to obtain basic user profile information. + +Curious about the major characteristics of OIDC? + +- **Use of identity tokens**. OIDC extends the authentication capabilities of OAuth by using components such as an "ID token" issued as a JSON Web Token (JWT). +- **Based on the OAuth 2.0 framework**. The ID token is obtained through a standard OAuth 2.0 flow, which also means having one protocol for authentication and authorization. +- **Simplicity**. OIDC is simple enough to integrate with basic applications, but it also has the features and security options to match demanding enterprise requirements. + +## What is Okta? + +Okta is a customizable, secure, and drop-in solution to add authentication and authorization services to your applications. It uses cloud software which helps organizations in managing and securing user authentications into applications. + +Okta provides you with a variety of advantages. I'll just name a few here. + +- **Single Sign-On (SSO)**. Okta’s SSO solution can quickly connect to and sync from any number of identity stores including AD, LDAP, HR systems, and other third-party identity providers. +- **Adaptive multifactor authentication**. Okta secures accounts and applications with a strong multifactor authentication solution. +- **Personalized user experience**. Okta provides ease of use for end users to access applications. + +## Practice: Integrate KubeSphere with Okta + +### Preparations + +As mentioned above, this article explains how to integrate KubeSphere with Okta. Therefore, you have to prepare a KubeSphere cluster in advance. You can take a look at [this tutorial](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/) to quickly set up your own KubeSphere cluster. + +### (Optional) Step 1: Enable HTTPS for KubeSphere web console + +For production environment, HTTPS is recommended as it provides better security. If you don't need HTTPS in your environment, you can skip this step. + +1. To enable HTTPS for your KubeSphere web console, you need to get a certificate from a Certificate Authority (CA). For example, you can apply a certificate from [Let's Encrypt](https://letsencrypt.org/). +2. [cert-manager](https://github.com/jetstack/cert-manager/) is a Kubernetes add-on to automate the management and issuance of TLS certificates from various issuing sources. To set up your cert-manager, you can take a look at [this example](https://cert-manager.io/docs/tutorials/acme/ingress/#step-5-deploy-cert-manager). I won't go into details here. + +In this article, let's use the URL https://console.kubesphere.io for accessing the KubeSphere web console. + +### (Optional) Step 2: Create an Okta account + +If you already have an Okta account, you can skip this step, but you need to make sure your existing Okta account has the admin permission. If not, go to the [Okta Workforce Identity Trial](https://www.okta.com/free-trial/) page to create an account. + +1. Enter your information in the required fields and click **Get Started**. + + ![step2](/images/blogs/en/okta/step2.png) + +2. After you receive the activation email from Okta and activate your account, you can log in to Okta using the registered domain. + +3. When you log in Okta for the first time, you will be asked to set up multifactor authentication. For more information, you can refer to [Okta documentation](https://help.okta.com/en/prod/Content/Topics/Security/mfa/mfa-home.htm). + +### Step 3: Create an Okta application + +1. On the Okta admin console, select **Applications > Applications** on the left navigation pane and click **Create App Integration**. + + ![step3-1](/images/blogs/en/okta/step3-1.png) + +2. In the displayed dialog box, select **OIDC - OpenID Connect**, select **Web Application**, and click **Next**. + + ![step3-2](/images/blogs/en/okta/step3-2.png) + +3. For **General Settings**, you need to configure the following settings: + + - **App integration name**. Specify a name for your application integration. + + - **Logo (Optional)**. Add a logo for your application integration. + + - **Grant type**. Select **Authorization Code** and **Refresh Token**. + + - **Sign-in redirect URIs**. The sign-in redirect URI is where Okta sends the authentication response and ID token for a sign-in request. In this example, I won't use the wildcard `*` in the sign-in redirect URI, which needs to be set in the format of `http(s):///oauth/redirect/`. `` can be set based on your needs, but it has to be consistent with the `name` specified under the `identityProviders` section in the CRD `ClusterConfiguration`. + + - **Sign-out redirect URIs (Optional)**. When KubeSphere contacts Okta to close a user session, Okta redirects the user to this URI. + + - (Optional) **Controlled access**. The default access option assigns and grants access to everyone in your Okta organization for this new app integration. Besides, you can choose to limit access to selected groups and use the field to enter the names of specific groups in your organization, or skip group assignment for now and create the app without assigning a group. + + ![step3-3](/images/blogs/en/okta/step3-3.png) + + When you finish configuring your settings, click **Save** to commit your application. + +4. On the Okta application page, you can click your application to go to its details page. On the General tab, you can see the **Client ID** and **Client secret**. We will need them later on when configuring the CRD `ClusterConfiguration` on KubeSphere. + + ![step3-4](/images/blogs/en/okta/step3-4.png) + +### Step 4: Make configurations on KubeSphere + +1. Log in to KubeSphere as `admin`, move the cursor to in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. Add the following fields under `spec.authentication.jwtSecret`. + + ```yaml + spec: + authentication: + jwtSecret: '' + oauthOptions: + identityProviders: + - mappingMethod: auto + name: Okta + provider: + clientID: **** # Get from Otka + clientSecret: **** # Get from Otka + issuer: https://kubesphere.Okta.com # Your Okta domain + redirectURL: https://console.kubesphere.io/oauth/redirect/Okta + scopes: + - openid + - email + - profile + type: OIDCIdentityProvider + ``` + +3. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. Okta login button is shown on the **Login** page of KubeSphere and you are redirected to Okta login page when clicking it. You will be required to register a valid username when log in to KubeSphere for the first time. + + ![step-4](/images/blogs/en/okta/step-4.png) + +4. After you successfully log in to KubeSphere, you can assign roles for the users. + +## Recap + +KubeSphere provides various ways to integrate with your existing identity providers. I believe OIDC is one of the easiest methods, which also enjoys support from many identity providers. Hope you can get a better understanding of how to integrate KubeSphere with Okta by following the steps in this article. + +Last but not the least, enjoy exploring KubeSphere! diff --git a/content/en/blogs/kubernetes-fundamentals-part-1.md b/content/en/blogs/kubernetes-fundamentals-part-1.md new file mode 100644 index 000000000..0d90c2e3b --- /dev/null +++ b/content/en/blogs/kubernetes-fundamentals-part-1.md @@ -0,0 +1,100 @@ +--- + title: 'Kubernetes Fundamental 1: Pods, Nodes, Deployments and Ingress' + tag: 'Kubernetes, fundamentals, beginners, guide' + keywords: 'Kubernetes, fundamentals, beginners, guide' + description: 'Kubernetes was born out of the necessity to make our sophisticated software more available, scalable, transportable, and deployable in small, independent modules.' + createTime: '2021-10-14' + author: 'Pulkit Singh' + snapshot: '/images/blogs/en/kubernetes-fundamentals-part-1/main-poster.png' +--- + + +![mainposter.png](/images/blogs/en/kubernetes-fundamentals-part-1/main-poster.png) + + +Hi! Today we'll discuss something that everyone is familiar with if they've heard the term "Containers." Yes, It's "Kubernetes"!! + +“Kubernetes was born out of the necessity to make our sophisticated software more available, scalable, transportable, and deployable in small, independent modules.” + +Kubernetes is gaining popularity as the future cloud software deployment and management standard. However, Kubernetes has a steep learning curve that comes with all of its capabilities. As a rookie, it can be tough to comprehend the concepts and core principles. There are a lot of pieces that make up the system, and determining which ones are vital for your scenario might be tough. + +So, what’s the need for it?? + + +## Do we need Kubernetes? + +![why.png](/images/blogs/en/kubernetes-fundamentals-part-1/why-poster.png) + +Kubernetes is a platform for container-based application orchestration control resource allocation, and traffic management for applications and microservices in the Kubernetes ecosystem. + +As a result, many aspects of maintaining a service-oriented application infrastructure have been made easier. Kubernetes, when combined with modern continuous integration and continuous deployment (CI/CD) systems, provides the foundation for scaling these apps with minimal technical work. + +So now it's time to talk about Kubernetes' fundamental notions! + +Some concepts to understand: + +## Containers +![containers.png](/images/blogs/en/kubernetes-fundamentals-part-1/container.png) + +Containers solve a significant issue in application development. Programmers work in a development environment when they write code. When they're ready to put the code into production, this is where problems arise. The code that worked on their machine does not work in production. Differences in operating systems, dependencies, and libraries are only a few of the reasons for this. +Containers solved this fundamental problem of portability by separating code from the infrastructure it operates on. Developers may choose to package their application into a small container image that contains all of the binaries and libraries it needs to run. +Any computer with a containerization platform such as Docker or containerd can run that container in production. + +## Pods +![pods.png](/images/blogs/en/kubernetes-fundamentals-part-1/pods.png) + +A Pod (as in a pod of whales or a pod of peas) is a group of one or more containers that share storage and network resources and operate according to a set of rules. A Pod's content is always co-located, scheduled, and executed in the same environment. A Pod is a "logical host" for an application that incorporates one or more tightly coupled application containers. +In a non-cloud context, applications running on the same physical or virtual computer are analogous to cloud applications running on the same logical host. + +## Nodes +![nodes.png](/images/blogs/en/kubernetes-fundamentals-part-1/nodes.png) + +A node is the smallest unit of computer hardware in Kubernetes. It's a representation of one of the computers in your cluster. Most production systems will have a node that is either a physical machine in a data center or a virtual machine housed on a cloud provider like Google Cloud Platform. Don't let traditions limit you; in theory, you can make a node out of almost anything. +Thinking of a machine as a "node" adds another degree of abstraction. Instead of worrying about each machine's characteristics, we can now just see it as a collection of CPU and RAM resources that can be utilized. Any machine in a Kubernetes cluster can be used to replace any other machine in this approach +In this, we have two terms known as: +![master-worker-node.png](/images/blogs/en/kubernetes-fundamentals-part-1/master-worker-node.png) + + +- Nodes ( Master ) +The master node controls the state of the cluster; for example, which applications are running and their corresponding container images. + +- Nodes ( Worker ) +Workloads execute in a container on physical or virtual servers. + +## Cluster +![cluster.png](/images/blogs/en/kubernetes-fundamentals-part-1/cluster.png) + +A cluster is a collection of machines on which containerized applications are run. Containerizing apps encapsulates an app's dependencies as well as some essential services. They are lighter and more adaptable than virtual machines. In this approach, clusters make it easier to design, move, and maintain applications. +Containers may run on numerous computers and environments, including virtual, physical, cloud-based, and on-premises, thanks to clusters. Unlike virtual machines, containers are not limited to a single operating system. Instead, they can share operating systems and execute from any location. +clusters are comprised of one master node and several worker nodes. + +## Persistent Volumes +![presistent-volumes.png](/images/blogs/en/kubernetes-fundamentals-part-1/presistent-volumes.png) + +Data can't be stored to any arbitrary location in the file system since programs operating on your cluster aren't guaranteed to run on a certain node. If a program attempts to store data to a file for later use but is then moved to a different node, the file will no longer be where the program expects it to be. As a result, each node's typical local storage is viewed as a temporary cache for holding programs, but any data saved locally cannot be expected to last. +Persistent Volumes are used by Kubernetes to store data indefinitely. While the cluster successfully pools and manages the CPU and RAM resources of all nodes, persistent file storage is not. As a Persistent Volume, local or cloud disks can be linked to the cluster. This is similar to connecting an external hard disk to the cluster. Persistent Volumes are a file system that can be mounted on the cluster without being tied to a specific node. A user's request for storage is called a PersistentVolumeClaim (PVC). It looks like a Pod. Node resources are consumed by pods, and PV resources are consumed by PVCs. Pods can request specified resource levels (CPU and Memory). The specific size and access modes might be requested in claims. + +## Deployments +![deployements.png](/images/blogs/en/kubernetes-fundamentals-part-1/deployements.png) + +The basic function of a deployment is to specify how many clones of a pod should be running at any given time. When you add deployment to the cluster, it will automatically start up the necessary number of Pods and monitor them. If a pod dies, the deployment will recreate it automatically. +You don't have to deal with pods manually if you use a deployment. Simply describe the system's desired state, and it will be managed for you automatically. + +## Ingress +![ingress.png](/images/blogs/en/kubernetes-fundamentals-part-1/ingress.png) + +Ingress offers HTTP and HTTPS routes to services within the cluster from outside the cluster. Rules established on the Ingress resource control traffic routing. An Ingress can be set up to provide Services with externally accessible URLs, load balance traffic, terminate SSL/TLS, and provide name-based virtual hosting. An Ingress controller is in charge of completing the Ingress, usually with a load balancer, but it may also configure your edge router or additional front ends to assist in traffic handling. An Ingress does not disclose any ports or protocols to the public. A service of type Service is often used to expose services other than HTTP and HTTPS to the internet. + +## Interactive hands-on tutorials +So we have talked a lot about basic concepts, so now if you want to learn Kubernetes from scratch do take a look at these interactive tutorials, you can run Kubernetes and practice it in your browser. + +- [Learn Kubernetes using Interactive Browser-Based Scenarios](https://www.katacoda.com/courses/kubernetes) + +- [Install KubeSphere on Kubernetes cluster](https://www.katacoda.com/kubesphere/scenarios/install-kubesphere-on-kubernetes) + + + +## Conclusion +So at last, we have discussed all the basic concepts you need to get started to work with Kubernetes. If you want to start experimenting with it, then do take a look at [Kubernetes getting started docs.](https://kubernetes.io/docs/setup/) + +so get started with it and stay tuned for more such content! diff --git a/content/en/blogs/kubesphere-3.2.0-ga-announcement.md b/content/en/blogs/kubesphere-3.2.0-ga-announcement.md new file mode 100644 index 000000000..c6b73e5e1 --- /dev/null +++ b/content/en/blogs/kubesphere-3.2.0-ga-announcement.md @@ -0,0 +1,145 @@ +--- +title: 'KubeSphere 3.2.0 GA: Bringing AI-oriented GPU Scheduling and Flexible Gateway' +tag: 'KubeSphere, release' +keyword: 'Kubernetes, KubeSphere, release, AI, GPU' +description: 'KubeSphere 3.2.0 supports GPU resource scheduling and management and GPU usage monitoring, which further improves user experience in cloud-native AI scenarios. Moreover, enhanced features such as multi-cluster management, multi-tenant management, observability, DevOps, app store, and service mesh further perfect the interactive design for better user experience.' +createTime: '2021-11-03' +author: 'KubeSphere' +snapshot: '/images/blogs/en/release-announcement3.2.0/v3.2.0-GA-cover.png' +--- + +![3.2.0GA](/images/blogs/en/release-announcement3.2.0/3.2.0GA.png) + +No one would ever doubt that **Cloud Native** has become the most popular service technology. KubeSphere, a distributed operating system for cloud-native application management with Kubernetes as its kernel, is definitely one of the tide riders surfing the cloud-native currents. KubeSphere has always been upholding the commitment of 100% open source. Owing to support from the open-source community, KubeSphere has rapidly established a worldwide presence. + +On November 2, 2021, we are excited to announce KubeSphere 3.2.0 is generally available! + +In KubeSphere 3.2.0, **GPU resource scheduling and management** and GPU usage monitoring further improve user experience in cloud-native AI scenarios. Moreover, enhanced features such as **multi-cluster management**, **multi-tenant management** , **observability**, **DevOps**, **app store, and service mesh** further perfect the interactive design for better user experience. + +It's also worth pointing out that KubeSphere 3.2.0 would not be possible without participation and contribution from enterprises and users outside QingCloud. You are everywhere, from feature development, test, defect report, proposal, best practice collection, bug fixing, internationalization to documentation. We appreciate your help and will give an acknowledgement at the end of the article. + +## **What's New in KubeSphere 3.2.0** + +### **GPU scheduling and quota management** + +With the rapid development of artificial intelligence (AI) and machine learning, more and more AI companies are calling for GPU resource scheduling and management features for server clusters, especially monitoring of GPU usage and management of GPU resource quotas. To address users' pain points, KubeSphere 3.2.0 makes our original GPU management even easier. + +KubeSphere 3.2.0 allows you to create GPU workloads on the GUI, schedule GPU resources, and manage GPU resource quotas by tenant. Specifically, it can be used for NVIDIA GPU and vGPU solutions. + +![00-GPU-scheduling-quota-manage](/images/blogs/en/release-announcement3.2.0/00-GPU-scheduling-quota-manage.png) + +### **Enhanced Kubernetes observability** + +Growing container and microservice technologies make it more complex to call components between systems, and the number of processes running in the system is also surging. With thousands of processes running in a distributed system, it is clear that conventional monitoring techniques are incapable of tracking the dependencies and calling paths between these processes, and this is where observability within the system becomes particularly important. + +***Observability is the ability to measure the internal states of a system by examining its outputs.*** A system is considered "observable" if the current state can be estimated by only using information from outputs, namely telemetry data collected by the three pillars of observability: logging, tracing and metrics. + +1. More powerful custom monitoring dashboards + +KubeSphere 3.1.0 has added the cluster-level custom monitoring feature, which allows you to generate custom Kubernetes monitoring dashboards by selecting a default template, uploading a template, or customizing a template. KubeSphere 3.2.0 provides a default template for creating a Grafana monitoring dashboard. You can import a Grafana monitoring dashboard by specifying the URL or uploading the JSON file of the dashboard, and then KubeSphere will automatically convert the Grafana monitoring dashboard into a custom monitoring dashboard. + +![01-Grafana-dashboard](/images/blogs/en/release-announcement3.2.0/01-Grafana-dashboard.png) + +For GPU resources, KubeSphere 3.2.0 also provides a default monitoring template with a wealth of metrics, so that you don't need to customize a template or edit a YAML file. + +![02-GPU-overview](/images/blogs/en/release-announcement3.2.0/02-GPU-overview.png) + +2. Alerting and logging + +- KubeSphere 3.2.0 supports communication with Elasticsearch through HTTPS. + +- In addition to the various notification channels such as email, DingTalk, WeCom, webhook, and Slack, KubeSphere 3.2.0 now also allows you to test and validate the notification channels you configure. + +![03-platform-settings](/images/blogs/en/release-announcement3.2.0/03-platform-settings.png) + +3. On the etcd monitoring page, the system automatically adds the `Leader` tag to the etcd leader. + +### **Multi-cloud and multi-cluster management** + +CNCF Survey 2020 shows that over 80% of users run more than two Kubernetes clusters in their production environment. KubeSphere aims at addressing multi-cluster and multi-cloud challenges. It provides a unified control plane and supports distributing applications and replicas to multiple Kubernetes clusters deployed across public cloud and on-premises environments. Moreover, KubeSphere supports observability across clusters, including features such as multi-dimensional monitoring, logging, events, and auditing logs. + +![04-cluster-manage](/images/blogs/en/release-announcement3.2.0/04-cluster-manage.png) + +KubeSphere 3.2.0 performs better in cross-cluster scheduling. When you are creating a federated Deployment across clusters, you can directly specify the number of replicas scheduled to each cluster. In addition, you can also specify the total number of replicas and weight of each cluster, and allow the system to automatically schedule replicas to each cluster according its weight. This feature is pretty helpful when you want to flexibly scale your Deployment and proportionally distribute replicas to multiple clusters. + +![05-federated-deployment](/images/blogs/en/release-announcement3.2.0/05-federated-deployment.png) + +![06-view-federation](/images/blogs/en/release-announcement3.2.0/06-view-federation.png) + +### **Operations-and-maintenance-friendly storage management** + +Enterprises running Kubernetes in production focus on persistent storage, as stable and reliable storage underpin their core data. On the KubeSphere 3.2.0 web console, the **Volumes** feature allows the administrator to decide whether to enable volume cloning, snapshot capturing, and volume expansion, making persistent storage operations and maintenance for stateful apps more convenient. + +![07-volume-manage](/images/blogs/en/release-announcement3.2.0/07-volume-manage.png) + +The default immediate binding mode binds a volume to a backend storage device immediately when the volume is created. This mode does not apply to storage devices with topology limits and may cause Pod scheduling failures. KubeSphere 3.2.0 provides the delayed binding mode to address this issue, which guarantees that a volume (PVC) is bound to a volume instance (PV) only after the volume is mounted to a Pod. This feature ensures that resources are properly scheduled based on Pod resource requests. + +![08-storage-class-settings](/images/blogs/en/release-announcement3.2.0/08-storage-class-settings.png) + +In addition to volume management, KubeSphere 3.2.0 now also supports Persistent Volume management, and you can view Persistent Volume information, edit Persistent Volumes, and delete Persistent Volumes on the web console. + +![09-volumes](/images/blogs/en/release-announcement3.2.0/09-volumes.png) + +When you create a volume snapshot, you can specify the snapshot type (`VolumeSnapshotClass`) to use a specific storage backend. + +### **Cluster gateway** + +KubeSphere 3.1 supports only project gateways, which require multiple IP addresses when there are multiple projects. Additionally, gateways in different workspaces are independent. + +KubeSphere 3.2.0 provides a cluster gateway, which means that all projects can share the same gateway. Existing project gateways are not affected by the cluster gateway. + +![10-gateway-settings](/images/blogs/en/release-announcement3.2.0/10-gateway-settings.png) + +The administrator can directly manage and configure all project gateways on the cluster gateway settings page without having to go to each workspace. The Kubernetes ecosystem provides many ingress controllers that can be used as the gateway solution. In KubeSphere 3.2.0, the gateway backend is refactored, which allows you to use any ingress controllers that support v1\\ingress as the gateway solution. + +![11-gateway-settings2](/images/blogs/en/release-announcement3.2.0/11-gateway-settings2.png) + +### **Authentication and authorization** + +A unified and all-round identity management and authentication system is indispensable for logical isolation in a multi-tenant system. Apart from support for AD/LDAP and OAuth 2.0 identity authentication systems, KubeSphere 3.2.0 also provides a built-in authentication service based on OpenID Connect to provide authentication capability for other components. OpenID Connect is a simple user identity authentication protocol based on OAuth 2.0 with a bunch of features and security options to meet enterprise-grade business requirements. + +### **App Store open to community partners** + +The App Store and application lifecycle management are unique features of KubeSphere, which are based on self-developed and open-source [OpenPitrix](https://github.com/openpitrix/openpitrix). + +KubeSphere 3.2.0 adds the feature of **dynamically loading community-developed Helm charts into the KubeSphere App Store.** You can send a pull request containing the Helm chart of a new app to the App Store chart repository. After the pull request is merged, the app is automatically loaded to the App Store regardless of the KubeSphere version. Welcome to submit your Helm charts to https://github.com/kubesphere/helm-charts. Nocalhost and Chaos Mesh have integrated their Helms charts into KubeSphere 3.2.0 by using this method, and you can easily install them to your Kubernetes clusters by one click. + +![12-app-store](/images/blogs/en/release-announcement3.2.0/12-app-store.png) + +### **More independent Kubernetes DevOps (on KubeSphere)** + +Kubernetes DevOps (on KubeSphere) has developed into an independent project [ks-devops](https://github.com/kubesphere/ks-devops) in KubeSphere v3.2.0, which is intended to allow users to run Kubernetes DevOps (on KubeSphere) in any Kubernetes clusters. Currently, you can use a Helm chart to install the backend components of ks-devops. + +Jenkins is a CI engine with a large user base and a rich plug-in ecosystem. In KubeSphere 3.2.0, we will let Jenkins do what it is good at—functioning only as an engine in the backend to provide stable pipeline management capability. A newly added CRD PipelineRun encapsulates run records of pipelines, which reduces the number of APIs required for directly interacting with Jenkins and boosts performance of CI pipelines. + +Starting from KubeSphere v3.2.0, Kubernetes DevOps (on KubeSphere) allows you to build images by using pipelines based on containerd. As an independent project, Kubernetes DevOps (on KubeSphere) will support independent deployment of the backend and frontend, introduce GitOps tools such as Tekton and ArgoCD, as well as integrate project management and test management platforms. + +### **Flexible Kubernetes cluster deployment** + +If you do not have a Kubernetes cluster, you can use KubeKey to install both Kubernetes and KubeSphere; if you already have a Kubernetes cluster, you can use ks-installer to install KubeSphere only. + +[KubeKey](https://github.com/kubesphere/kubekey) is an efficient open-source installer, which uses Docker as the default container runtime. It can also use CRI runtimes such as containerd, CRI-O, and iSula. You can use KubeKey to deploy an etcd cluster independent of Kubernetes for better flexibility. + +KubeKey provides the following new features: + +- Supports the latest Kubernetes version 1.22.1 (backward compatible with 4 earlier versions); supports deployment of K3s (experimental). + +- Supports automatic renewal of Kubernetes cluster certificates. + +- Supports a high availability deployment mode that uses an internal load balancer to reduce the complexity of cluster deployment. + +- Most of the integrated components such as Istio, Jaeger, Prometheus Operator, Fluent Bit, KubeEdge, and Nginx ingress controller have been updated to the latest. For more information, refer to [Release Notes 3.2.0](https://kubesphere.io/docs/release/release-v320/). + +### **Better user experience** + +To provide a user-friendly web console for global users, our SIG Docs members have refactored and optimized the UI text on the web console to deliver more professional and accurate UI text and terms. Hard-coded and concatenated UI strings are removed for better UI localization and internationalization support. + +Some heavy users in the KubeSphere community have participated in enhancing some frontend features. For example, KubeSphere now supports searching for images in a Harbor registry and mounting volumes to init containers, and the feature of automatic workload restart during volume expansion is removed. + +For more information about user experience optimization, enhanced features, and fixed bugs, please refer to [Release Notes 3.2.0](https://kubesphere.io/docs/release/release-v320/). You can download and install KubeSphere 3.2.0 by referring to [All-in-One on Linux](https://kubesphere.io/docs/quick-start/all-in-one-on-linux/ ) and [Minimal KubeSphere on Kubernetes](https://kubesphere.io/docs/quick-start/minimal-kubesphere-on-k8s/), and we will offer an offline installation solution in the KubeSphere community in one week. + +## **Acknowledgements** + +The KubeSphere team would like to acknowledge contributions from the people who make KubeSphere 3.2.0 possible. The following GitHub IDs are not listed in order. If you are not listed, please contact us. + +![v3.2.0-contributors](/images/blogs/en/release-announcement3.2.0/v3.2.0-contributors.png) diff --git a/content/en/blogs/openelb-joins-cncf-sandbox-project.md b/content/en/blogs/openelb-joins-cncf-sandbox-project.md new file mode 100644 index 000000000..331181160 --- /dev/null +++ b/content/en/blogs/openelb-joins-cncf-sandbox-project.md @@ -0,0 +1,85 @@ +--- +title: 'OpenELB Joins the CNCF Sandbox, Making Service Exposure in Private Environments Easier' +tag: 'CNCF' +keyword: 'OpenELB, Kubernetes, LoadBalancer, Bare metal server' +description: 'CNCF accepted OpenELB, a load balancer plugin open sourced by KubeSphere, into the CNCF Sandbox' +createTime: '2021-11-24' +author: 'KubeSphere' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/4761636694917_.pic_hd.jpg' +--- + +![Cover](https://kubesphere-community.pek3b.qingstor.com/images/4761636694917_.pic_hd.jpg) + +On November 10, the Cloud Native Computing Foundation (CNCF) accepted OpenELB, a load balancer plugin open sourced by KubeSphere, into the CNCF Sandbox. + +![Diagram](https://kubesphere-community.pek3b.qingstor.com/images/8471636692467_.pic_hd.jpg) + +OpenELB, formerly known as "PorterLB", is a load balancer plugin designed for bare metal servers, edge devices, and private environments. It serves as an LB plugin for Kubernetes, K3s, and KubeSphere to expose LoadBalancer services to outside the cluster. OpenELB provides the following core functions: +- Load balancing in BGP mode and Layer 2 mode +- ECMP-based load balancing +- IP address pool management +- BGP configurations using CRDs + +![Architecture](https://kubesphere-community.pek3b.qingstor.com/images/8441636691354_.pic_hd.jpg) + +## Why Did We Initiate OpenELB +In the KubeSphere community, we surveyed over 5,000 users to find out environments that they use to deploy Kubernetes, and the result shows that nearly 36% of the users deploy Kubernetes on bare metal servers, and many users install and use Kubernetes or K3s on air-gapped data centers or edge devices. In private environments, exposing LoadBalancer services is difficult. +![User surveys](https://kubesphere-community.pek3b.qingstor.com/images/8401636689164_.pic.jpg) + +In Kubernetes clusters, LoadBalancer services can be used to expose backend workloads to outside the cluster. Cloud vendors usually provide cloud-based LB plugins, which requires users to deploy their clusters on specific IaaS platforms. However, most enterprise users deploy Kubernetes clusters on bare metal servers, especially when these clusters are used in production. For private environments with bare metal servers and edge clusters, Kubernetes does not provide a LoadBalancer solution. + +OpenELB is designed to expose LoadBalancer services in non-public-cloud Kubernetes clusters. It provides easy-to-use EIPs and makes IP address pool management easier for users in private environments. +## OpenELB Adopters and Contributors +Currently, OpenELB has been used in production environments by many enterprises, such as BENLAI, Suzhou TV, CVTE, Wisdom World, Jollychic, QingCloud, BAIWANG, Rocketbyte, and more. At the end of 2019, BENLAI has used an earlier version of OpenELB in production. Now, OpenELB has attracted 13 contributors and more than 100 community members. +![Enterprises using OpenELB](https://kubesphere-community.pek3b.qingstor.com/images/8411636689286_.pic_hd.jpg) + +## Differences Between OpenELB and MetalLB +MetalLB is also a CNCF Sandbox project. It was launched at the end of 2017, and has been widely accepted by the community up to now. As a relatively young project, OpenELB is more Kubernetes-native. Thanks to contributions from the community, OpenELB has released eight versions and supported multiple routing methods. The following describes differences between OpenELB and MetalLB. +### Cloud-native architecture +In OpenELB, you can use CRDs to manage IP addresses and BGP settings. OpenELB is user-friendly for those who are familiar with kubectl. You can also directly use Kubernetes APIs to further customize OpenELB. In MetalLB, you can only manage IP addresses and BGP settings by using configmaps and obtain their status from logs. +### Flexible IP address management + +OpenELB manages IP addresses by using the Eip CRD. It defines the status sub-resource to store the assignment status of IP addresses, which prevents conflicts among replicas and simplifies the programming logic. + +### Advertise routes using GoBGP + +MetalLB implements BGP by itself, while OpenELB implements BGP by using GoBGP, which has the following advantages: + +- Low development cost and robust support from the GoBGP community +- Rich features of GoBGP +- Dynamic configuration of GoBGP by using the BgpConf and BgpPeer CRDs, and the latest configurations are automatically loaded without OpenELB restart +- When GoBGP is used as a library, the community provides Protocol Buffers (Protobuf) APIs. OpenELB references these APIs when implementing the BgpConf and BgpPeer CRD and remains compatible with GoBGP +- OpenELB also provides status to view configurations of the BGP neighbor, which provides rich status information + +### Simple architecture and less resources occupied + +You can create an OpenELB deployment of multiple pod replicas to ensure high availability. Established connections still work well even though some replicas crash. + +In BGP mode, all replicas advertise equal-cost routes to the router and usually two replicas are sufficient. In Layer 2 mode, a leader is elected among the replicas by using the leader election mechanism of Kubernetes to respond to ARP/NDP requests. + +## Installation and Use of OpenELB + +You can deploy OpenELB on any standard Kubernetes and K3s verions and their distributions by using a YAML file or Helm chart. Alternatively, you can deploy it from the App Store or an app repository on the KubeSphere web console. For more information, see [OpenELB Documentation](https://openelb.github.io/docs/getting-started/installation/). + +## Future Plan + +Backed by CNCF, OpenELB will maintain its commitment as an open-source project driven completely by the community. The following features are planned and you are always welcome to contribute and send feedbacks. + +- VIP mode that supports Kubernetes high availability based on Keepalived +- Load balancing for kube-apiserver +- BGP policy configuration +- VIP Group +- Support for IPv6 +- GUI for EIP and IP pool management +- Integration to the KubeSphere web console and support for Prometheus metrics + +To make service exposure and IP address management in private environments easier, we will continuously launch a variety of community activities to attract more developers and users. + +## Commitment to Open Source + + The KubeSphere team has always been upholding the "Upstream first" principle. In July, 2021, the KubeSphere team donated Fluentbit Operator as a CNCF sub-project to the Fluent community. Now OpenELB, which was initiated by the KubeSphere team, also joins the CNCF sandbox. In the future, the KubeSphere team will serve as one of participants of the OpenELB project and maintain its commitment to open source. We will continue to work closely with all partners in the containerization field to build a vendor-neutral and open-source OpenELB community and ecosystem. Join the OpenELB community, tell us your experience when using OpenELB, and contribute to the OpenELB project! + +- ✨ GitHub: [https://github.com/kubesphere/openelb/](https://github.com/kubesphere/openelb/) +- 💻 Official website: [https://openelb.github.io/](https://openelb.github.io/) +- 🙋 Slack channel: kubesphere.slack.com + diff --git a/content/en/blogs/serverless-way-for-kubernetes-log-alert.md b/content/en/blogs/serverless-way-for-kubernetes-log-alert.md new file mode 100644 index 000000000..19b1d02d4 --- /dev/null +++ b/content/en/blogs/serverless-way-for-kubernetes-log-alert.md @@ -0,0 +1,428 @@ +--- +title: 'Serverless Use Case: Elastic Kubernetes Log Alerts with OpenFunction and Kafka' +tag: 'OpenFunction, KubeSphere, Kubernetes' +keywords: 'OpenFunction, Serverless, KubeSphere, Kubernetes, Kafka, FaaS' +description: 'This blog post offers ideas for serverless log processing, which reduces the link cost while improving flexibility.' +createTime: '2021-08-26' +author: 'Fang Tian, Bettygogo' +snapshot: '/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/kubesphere snapshot.png' +--- + +## Overview + +How do you handle container logs collected by the message server? You may face a dilemma: Deploying a dedicated log processing workload can be costly, and it is difficult to assess the number of standby log processing workloads required when the quantity of logs fluctuates sharply. This blog post offers ideas for serverless log processing, which reduces the link cost while improving flexibility. + +Our general design idea is to add a Kafka server as a log receiver, and then use the log input to the Kafka server as an event to drive the serverless workloads to handle logs. Roughly, the following steps are involved: + +1. Set up a Kafka server as the log receiver for Kubernetes clusters. +2. Deploy OpenFunction to provide serverless capabilities for log processing workloads. +3. Write log processing functions to grab specific logs to generate alerting messages. +4. Configure [Notification Manager](https://github.com/kubesphere/notification-manager/) to send alerts to Slack. + +![](https://pek3b.qingstor.com/kubesphere-community/images/202108261124546.png) + +In this scenario, we will make use of the serverless capabilities of[ OpenFunction](https://github.com/OpenFunction/OpenFunction). + +> [OpenFunction](https://github.com/OpenFunction/OpenFunction) is an open-source FaaS (serverless) project initiated by the KubeSphere community. It is designed to allow users to focus on their business logic without the hassle of caring about the underlying operating environment and infrastructure. Currently, the project provides the following key capabilities: +> +> - Builds OCI images from Dockerfile or Buildpacks. +> - Runs serverless workloads using Knative Serving or OpenFunctionAsync (backed by KEDA + Dapr) as a runtime. +> - Equipped with a built-in event-driven framework. + +## Use Kafka as a Log Receiver + +First, enable the **logging** component for the KubeSphere platform (For more information, please refer to[ Enable Pluggable Components](https://kubesphere.io/docs/pluggable-components/). Next, we can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) to build a minimal Kafka server. + +1. In the `default` namespace, install [strimzi-kafka-operator.](https://github.com/strimzi/strimzi-kafka-operator) + + ```shell + helm repo add strimzi https://strimzi.io/charts/ + helm install kafka-operator -n default strimzi/strimzi-kafka-operator + ``` + +2. Run the following commands to create a Kafka cluster and a Kafka topic in the `default` namespace. The storage type of the created Kafka and ZooKeeper clusters is **ephemeral**. Here, we use `emptyDir` for demonstration. + + > Note that we have created a topic named `logs` for follow-up use. + + ```shell + cat < ****Cluster Management****. + + > If you have enabled the [multi-cluster feature](https://kubesphere.io/docs/multicluster-management/), you need to select a cluster. + +2. On the ****Cluster Management**** page, click **Log Collections** under ****Cluster Settings****. + +3. Click **Add Log Receiver,** and then click **Kafka**. Enter the service address and port number of Kafka, and then click ****OK****. + +![add-log-receiver](/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/add-log-receiver.png) + +4. Run the following commands to verify that Kafka clusters can collect logs from Fluent Bit. + + ```shell + # Starts a utility pod. + $ kubectl run utils --image=arunvelsriram/utils -i --tty --rm + # Checks logs in the `logs` topic + $ kafkacat -C -b kafka-logs-receiver-kafka-0.kafka-logs-receiver-kafka-brokers.default.svc:9092 -t logs + ``` + +## Deploy OpenFunction + +According to the design in Overview, we need to deploy OpenFunction first. As OpenFunction has referenced multiple third-party projects, such as Knative, Tekton, ShipWright, Dapr, and KEDA, it is cumbersome if you manually deploy it. It is recommended that you refer to [Prerequisites](https://github.com/OpenFunction/OpenFunction#prerequisites) to quickly deploy dependencies of OpenFunction. + +> In the command, `--with-shipwright` means that Shipwright is deployed as the build driver for the function; `--with-openFuncAsync` means that OpenFuncAsync Runtime is deployed as the load driver for the function. When you have limited access to GitHub and Google, you can add the `--poor-network` parameter to download related components. + +```shell +sh hack/deploy.sh --with-shipwright --with-openFuncAsync --poor-network +``` + +Deploy OpenFunction. + +> We install the latest stable version here. Alternatively, you can use the development version. For more information, please refer to the[ Install OpenFunction](https://github.com/OpenFunction/OpenFunction#install) section. +> +> To make sure that Shipwright works properly, we provide a default build policy, and you can run the following commands to set the policy. +> +> ```shell +> kubectl apply -f https://raw.githubusercontent.com/OpenFunction/OpenFunction/main/config/strategy/openfunction.yaml +> ``` + +```shell +kubectl apply -f https://github.com/OpenFunction/OpenFunction/releases/download/v0.3.0/bundle.yaml +``` + +## Write a Log Processing Function + +In this example, we install WordPress as the log producer. The application's workload resides in the `demo-project` namespace and the Pod's name is `wordpress-v1-f54f697c5-hdn2z`. + +When a request returns **404**, the log content is as follows: + +```json +{"@timestamp":1629856477.226758,"log":"*.*.*.* - - [25/Aug/2021:01:54:36 +0000] \"GET /notfound HTTP/1.1\" 404 49923 \"-\" \"curl/7.58.0\"\n","time":"2021-08-25T01:54:37.226757612Z","kubernetes":{"pod_name":"wordpress-v1-f54f697c5-hdn2z","namespace_name":"demo-project","container_name":"container-nrdsp1","docker_id":"bb7b48e2883be0c05b22c04b1d1573729dd06223ae0b1676e33a4fac655958a5","container_image":"wordpress:4.8-apache"}} + +``` + +Here are our needs: When a request returns **404**, the Notification Manager sends a notification to the receiver (Configure a Slack alert receiver according to [Configure Slack Notifications](https://kubesphere.io/docs/cluster-administration/platform-settings/notification-management/configure-slack/), and records the namespace, Pod name, request path, request method, and other information. Therefore, we write a simple function: + +> You can learn how to use `openfunction-context` from [OpenFunction Context Spec,](https://github.com/OpenFunction/functions-framework/blob/main/docs/OpenFunction-context-specs.md) which is a tool library provided by OpenFunction for writing functions. You can learn more about OpenFunction functions from [OpenFunction Samples.](https://github.com/OpenFunction/samples) + +```go +package logshandler + +import ( + "encoding/json" + "fmt" + "log" + "regexp" + "time" + + ofctx "github.com/OpenFunction/functions-framework-go/openfunction-context" + alert "github.com/prometheus/alertmanager/template" +) + +const ( + HTTPCodeNotFound = "404" + Namespace = "demo-project" + PodName = "wordpress-v1-[A-Za-z0-9]{9}-[A-Za-z0-9]{5}" + AlertName = "404 Request" + Severity = "warning" +) + +// The ctx parameter of the LogHandler function provides a context handle for user functions in the cluster. For example, ctx.SendTo is used to send data to a specified destination. +// The in parameter in the LogsHandle function is used to pass byte data (if any) from the input to the function. +func LogsHandler(ctx *ofctx.OpenFunctionContext, in []byte) int { + content := string(in) + // We set three regular expressions here for matching the HTTP status code, resource namespace, and Pod name of resources, respectively. + matchHTTPCode, _ := regexp.MatchString(fmt.Sprintf(" %s ", HTTPCodeNotFound), content) + matchNamespace, _ := regexp.MatchString(fmt.Sprintf("namespace_name\":\"%s", Namespace), content) + matchPodName := regexp.MustCompile(fmt.Sprintf(`(%s)`, PodName)).FindStringSubmatch(content) + + if matchHTTPCode && matchNamespace && matchPodName != nil { + log.Printf("Match log - Content: %s", content) + + // If the input data matches all three regular expressions above, we need to extract some log information to be used in the alert. + // The alert contains the following information: HTTP method of the 404 request, HTTP path, and Pod name. + match := regexp.MustCompile(`([A-Z]+) (/\S*) HTTP`).FindStringSubmatch(content) + if match == nil { + return 500 + } + path := match[len(match)-1] + method := match[len(match)-2] + podName := matchPodName[len(matchPodName)-1] + + // After we collect major information, we can use the data struct of altermanager to compose an alert. + notify := &alert.Data{ + Receiver: "notification_manager", + Status: "firing", + Alerts: alert.Alerts{}, + GroupLabels: alert.KV{"alertname": AlertName, "namespace": Namespace}, + CommonLabels: alert.KV{"alertname": AlertName, "namespace": Namespace, "severity": Severity}, + CommonAnnotations: alert.KV{}, + ExternalURL: "", + } + alt := alert.Alert{ + Status: "firing", + Labels: alert.KV{ + "alertname": AlertName, + "namespace": Namespace, + "severity": Severity, + "pod": podName, + "path": path, + "method": method, + }, + Annotations: alert.KV{}, + StartsAt: time.Now(), + EndsAt: time.Time{}, + GeneratorURL: "", + Fingerprint: "", + } + notify.Alerts = append(notify.Alerts, alt) + notifyBytes, _ := json.Marshal(notify) + + // Use ctx.SendTo to send the content to the "notification-manager" output (you can find its definition in the following logs-handler-function.yaml function configuration file. + if err := ctx.SendTo(notifyBytes, "notification-manager"); err != nil { + panic(err) + } + log.Printf("Send log to notification manager.") + } + return 200 +} + +``` + +Upload this function to the code repository and record the ****URL of the code repository**** and the **path of the code in the repository**, which will be used in the **Create a function** step. + +> You can find this case in [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/OpenFuncAsync/logs-handler-function). + +## Create a Function + +Use OpenFunction to build the above function. First, set up a key file `push-secret` to access the image repository (After the OCI image is constructed using the code, OpenFunction will upload the image to the image repository for subsequent load startup.): + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +Apply the function configuration file `logs-handler-function.yaml`. + +> The function definition explains the use of two key components: +> +> [Dapr](https://dapr.io/) shields complex middleware from applications, making it easy for the `logs-handler` function to handle Kafka events. +> +> [KEDA](https://keda.sh/) drives the startup of the `logs-handler` function by monitoring event traffic in the message server, and dynamically extends the `logs-handler` instance based on the consumption delay of Kafka messages. + +```yaml +apiVersion: core.openfunction.io/v1alpha1 +kind: Function +metadata: + name: logs-handler +spec: + version: "v1.0.0" + # Defines the upload path for the built image. + image: openfunctiondev/logs-async-handler:v1 + imageCredentials: + name: push-secret + build: + builder: openfunctiondev/go115-builder:v0.2.0 + env: + FUNC_NAME: "LogsHandler" + # Defines the path of the source code. + # url specifies the URL of the above-mentioned code repository. + # sourceSubPath specifies the path of the code in the repository. + srcRepo: + url: "https://github.com/OpenFunction/samples.git" + sourceSubPath: "functions/OpenFuncAsync/logs-handler-function/" + serving: + # OpenFuncAsync is an event-driven, asynchronous runtime implemented in OpenFunction by using KEDA_Dapr. + runtime: "OpenFuncAsync" + openFuncAsync: + # This section defines the function input (kafka-receiver) and the output (notification-manager), which correspond to definitions in the components section. + dapr: + inputs: + - name: kafka-receiver + type: bindings + outputs: + - name: notification-manager + type: bindings + params: + operation: "post" + type: "bindings" + annotations: + dapr.io/log-level: "debug" + # This section defines the above-mentioned input and output (that is, Dapr Components). + components: + - name: kafka-receiver + type: bindings.kafka + version: v1 + metadata: + - name: brokers + value: "kafka-logs-receiver-kafka-brokers:9092" + - name: authRequired + value: "false" + - name: publishTopic + value: "logs" + - name: topics + value: "logs" + - name: consumerGroup + value: "logs-handler" + # This is the URL of KubeSphere notification-manager. + - name: notification-manager + type: bindings.http + version: v1 + metadata: + - name: url + value: http://notification-manager-svc.kubesphere-monitoring-system.svc.cluster.local:19093/api/v2/alerts + keda: + scaledObject: + pollingInterval: 15 + minReplicaCount: 0 + maxReplicaCount: 10 + cooldownPeriod: 30 + # This section defines the trigger of the function, that is, the log topic of the Kafka server. + # This section also defines the message lag threshold (the value is 10), which means that when the number of lagged messages exceeds 10, the number of logs-handler instances will automatically scale out. + triggers: + - type: kafka + metadata: + topic: logs + bootstrapServers: kafka-logs-receiver-kafka-brokers.default.svc.cluster.local:9092 + consumerGroup: logs-handler + lagThreshold: "10" +``` + +## Demonstrate the Result + +Disable the Kafka log receiver first: On the ****Log Collections**** page, click **Kafka** to go to the details page, and choose **More** > **Change Status** > **Close**. + +Wait for a while, and then it can be observed that number of instances of the `logs-handler` function has reduced to 0. + +Then set the status of the Kafka log receiver to **Collecting**, and `logs-handler` also starts. + +```shell +~# kubectl get po --watch +NAME READY STATUS RESTARTS AGE +kafka-logs-receiver-entity-operator-568957ff84-tdrrx 3/3 Running 0 7m27s +kafka-logs-receiver-kafka-0 1/1 Running 0 7m48s +kafka-logs-receiver-zookeeper-0 1/1 Running 0 8m12s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 34s +strimzi-cluster-operator-687fdd6f77-kc8cv 1/1 Running 0 10m +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 36s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 37s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 2s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 1/2 Running 0 4s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 2/2 Running 0 11s +``` + +Next, initialize a request for a non-existent path of the WordPress application: + +```shell +curl http:///notfound +``` + +You can see that Slack has received this message (Slack will not receive an alert message when we visit the WordPress site properly). + +![](https://i.imgur.com/YQc5uOq.png) + +### Explore More Possibilities + +We can further discuss a solution using synchronous functions: + +To use Knative Serving properly, we need to set the load balancer address of its gateway. (You can use the local address as a workaround.) + +```bash +# Repalce the following "1.2.3.4" with the actual values. +$ kubectl patch svc -n kourier-system kourier \ +-p '{"spec": {"type": "LoadBalancer", "externalIPs": ["1.2.3.4"]}}' + +$ kubectl patch configmap/config-domain -n knative-serving \ +-type merge --patch '{"data":{"1.2.3.4.sslip.io":""}}' +``` + +OpenFunction drives the running of the `Knative` function in two ways: (1) Use the Kafka server in asynchronous mode; (2) Use its own event framework to connect to the Kafka server, and then operate in Sink mode. You can refer to the case in [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/Knative/logs-handler-function). + +In this solution, the processing speed of synchronous functions is lower than that of asynchronous functions. We can also use KEDA to trigger the concurrency mechanism of Knative Serving, but it is not as convenient as asynchronous functions. (In the future, we will optimize the OpenFunction event framework to make up for the shortcomings of synchronous functions.) + +It can be seen that different types of serverless functions have their unique advantages depending on task scenarios. For example, when it comes to handling an orderly control flow function, a synchronous function outperforms an asynchronous function. + +## Summary + +Serverless matches our expectations for rapid disassembly and reconstruction of business scenarios. + +As you can see in this case, OpenFunction not only increases flexibility of log processing and alert notification links by using the serverless technology, but also uses a function framework to simplify complex setups typically required to connect to Kafka into semantically clear code. Moreover, we are also continuously developing OpenFunction so that components can be powered by our own serverless capabilities in follow-up releases. \ No newline at end of file diff --git a/content/en/blogs/set-up-ha-cluster-using-keepalived-haproxy.md b/content/en/blogs/set-up-ha-cluster-using-keepalived-haproxy.md index bf2e43cba..e5844e31c 100644 --- a/content/en/blogs/set-up-ha-cluster-using-keepalived-haproxy.md +++ b/content/en/blogs/set-up-ha-cluster-using-keepalived-haproxy.md @@ -279,7 +279,7 @@ Before you start to create your Kubernetes cluster, make sure you have tested th 3. Create a configuration file to specify cluster information. The Kubernetes version I am going to install is `v1.17.9`. ```bash - ./kk create config --with-kubernetes v1.17.9 + ./kk create config --with-kubernetes v1.20.4 ``` 4. A default file `config-sample.yaml` will be created. Edit the file and here is my configuration for your reference: diff --git a/content/en/blogs/transform-traditional-applications-into-microservices.md b/content/en/blogs/transform-traditional-applications-into-microservices.md new file mode 100644 index 000000000..8caf9935f --- /dev/null +++ b/content/en/blogs/transform-traditional-applications-into-microservices.md @@ -0,0 +1,227 @@ +--- +title: 'Transform Traditional Applications to Microservices into Enable the Traffic Monitoring Feature' +tag: 'KubeSphere, Kubernetes, Microservices' +keywords: 'KubeSphere, Kubernetes, Microservices, Traffic Monitoring, Service Mesh' +description: 'This article describes how to transform a traditional application into microservices to use service mesh features, such as grayscale release, traffic monitoring, and tracing.' +createTime: '2021-12-21' +author: 'Zackzhang, Bettygogo' +snapshot: '/images/blogs/transform-traditional-applications-into-microservices/traffic-monitoring-cover.png' +--- + +## Challenges + +When trying to use service mesh of Kubernetes, most of KubeSphere users only manage to deploy a Bookinfo sample on KubeSphere. They are also struggling in understanding how to explore the full capabilities of service mesh, let alone transform traditional applications into microservices. + +This article describes how to transform a traditional application into microservices to use service mesh features, such as grayscale release, traffic monitoring, and tracing. + +## KubeSphere Microservices + +KubeSphere microservices use the application CRD to abstract associated resources into a concrete application, and provide traffic monitoring, grayscale release, and tracing features with the help of Istio's application features. Moreover, KubeSphere microservices shield complex DestinationRule and VirtualService of Istio and automatically update resources according to traffic monitoring settings and grayscale release policies. + +Prerequisites for using KubeSphere microservices are as follows: + +1. A deployment must contain labels `app` and `version`, and a service must contain the `app` label. The app labels (equivalent to a service name) of the deployment and the service must be the same (required by Istio). + +2. All resources of an application must contain labels app.kubernetes.io/name= and app.kubernetes.io/version= (required by the application). + +3. A deployment name must consist of a service name followed by v1. For example, when the service name is nginx, the deployment name is nginx-v1. + +4. The deployment template must contain annotations (required during automatic sidecar injection of Istio). + +```bash + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" +``` + +5. The service and deployment contain annotations. The KubeSphere CRD Controller automatically matches VirtualService and DestinationRules to the service. + +```bash +# Service +kind: Service +metadata: + annotations: + servicemesh.kubesphere.io/enabled: "true" + +# Deployment +kind: Deployment +metadata: + annotations: + servicemesh.kubesphere.io/enabled: "true" +``` + +## Example + +To implement traffic monitoring, two independent applications are required (for example, WordPress and MySQL). After the two applications work properly, we then transform them into KubeSphere microservices and inject the sidecar. + +Open the [MySQL Docker Hub](https://hub.docker.com/_/mysql "mysql dockerhub") page, and you can see explanation of the `MYSQL_ROOT_PASSWORD` variable. On the KubeSphere web console, set the default MySQL password. + +Open the [WordPress Docker Hub](https://hub.docker.com/_/wordpress "wordpress dockerhub") page, and you can see three database variables `WORDPRESS_DB_PASSWORD` `WORDPRESS_DB_USER` `WORDPRESS_DB_HOST`. On the KubeSphere web console, set values of the three variables to connect Wordpress to MySQL. + +## Create a Traditional Application + +First, create a workspace and a project with the gateway and tracing features enabled. + +![00-enable-gateway](/images/blogs/transform-traditional-applications-into-microservices/00-enable-gateway.png) + +Select **Application Workloads** > **Service**. On the **Service** page, click **Create**. On the **Create Service** page, click **Stateful Service** to create a MySQL service. + +![01-create-mysql](/images/blogs/transform-traditional-applications-into-microservices/01-create-mysql.png) + +![02-create-mysql](/images/blogs/transform-traditional-applications-into-microservices/02-create-mysql.png) + +In **Environment Variables**, set the default password. + +![03-set-password](/images/blogs/transform-traditional-applications-into-microservices/03-set-password.png) + +Likewise, create a stateless WordPress service. + +![04-create-wp](/images/blogs/transform-traditional-applications-into-microservices/04-create-wp.png) + +![05-create-wp](/images/blogs/transform-traditional-applications-into-microservices/05-create-wp.png) + +The following variables are for demonstration only. In production, select **Use ConfigMap or Secret**. + +![06-create-wp](/images/blogs/transform-traditional-applications-into-microservices/06-create-wp.png) + +Select **Edit External Access**, and then change the access mode to **NodePort**. + +![07-nodeport](/images/blogs/transform-traditional-applications-into-microservices/07-nodeport.png) + +After the pods run properly, access the service page at `:`. It can be seen that the application runs properly. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078616-495561-image.png) + +Check the pods. It is found that the sidecar is not enabled, and each pod contains only one container. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078689-536270-image.png) + +The traffic topology is not displayed because service mesh is not enabled. The following describes how to enable service mesh. + +## Deploy an Application + +1. Apply the following YAML file to deploy an application. + +```bash +# wordpress.yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + annotations: + kubesphere.io/creator: admin + servicemesh.kubesphere.io/enabled: "true" + labels: + app.kubernetes.io/name: wordpress-app + app.kubernetes.io/version: v1 + name: wordpress-app # The name of the application must be the same as that defined in label app.kubernetes.io/name. +spec: + addOwnerRef: true + componentKinds: + - group: "" + kind: Service + - group: apps + kind: Deployment + - group: apps + kind: StatefulSet + - group: extensions + kind: Ingress + - group: servicemesh.kubesphere.io + kind: Strategy + - group: servicemesh.kubesphere.io + kind: ServicePolicy + selector: + matchLabels: + # Tag resources with the following two labels to specify their relationships. + app.kubernetes.io/name: wordpress-app + app.kubernetes.io/version: v1 +``` + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607079099-328486-image.png) + +The application state is 0/0, which indicates that no applications are associated. + +> If the application state is not displayed and running the `kubectl get app` command does not work, it indicates that the CRD of your application is legacy. Run the following command to update the CRD: + +```bash +kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/master/roles/common/files/ks-crds/app.k8s.io_applications.yaml +``` + +2. Add labels for the target application to declare services that belong to the application. + +```bash +kubectl -n sample label deploy/wordpress-v1 app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +kubectl -n sample label svc/wordpress app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 + +kubectl -n sample label sts/mysql-v1 app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +kubectl -n sample label svc/wordpress app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +``` + +Then, check the application, and you can find that the number of services associated with the application is no longer 0. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607087747-296599-image.png) + +3. Add annotations to the target deployment and service. + +```bash +kubectl -n sample annotate svc/wordpress servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate deploy/wordpress-v1 servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate svc/mysql servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate sts/mysql-v1 servicemesh.kubesphere.io/enabled="true" +``` + +4. Add annotations to the deploy and sts templates to enable the sidecar. + +```bash + kubectl -n sample edit deploy/wordpress-v1 +... + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" # Add the row. + +kubectl -n sample edit sts/mysql-v1 +... + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" # Add the row. +``` + +> Note: You can inject the sidebar by simply adding annotations to the template. + +Check whether the sidecar has been injected. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607088879-407091-image.png) + +5. Istio-relevant labels and naming rules must meet the requirements. If you create the service on KubeSphere, you don't need to modify the labels and naming rules. + +For labels relevant to the app version, if you create the service on KubeSphere, labels in the following red boxes are added by default. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607088007-59417-image.png) + +## Check the Transformation Result + +After the transformation is completed, check the application page. + +![08-check-app](/images/blogs/transform-traditional-applications-into-microservices/08-check-app.png) + +Expose the WordPress service. + +![09-expose-port](/images/blogs/transform-traditional-applications-into-microservices/09-expose-port.png) + +Access the service, and it can be found that the application works properly. + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607089124-868165-image.png) + +It can be found that the traffic has been visualized, and the data shows that the traffic flows properly. + +![10-traffic-topology](/images/blogs/transform-traditional-applications-into-microservices/10-traffic-topology.png) + +Also, the grayscale release and tracing features function well. + +Note that you need to enable the route feature before using the tracing feature. + +## Summary + +We can successfully transform the application into microservices by performing the previous steps. As the transformation process is tedious, the KubeSphere team will continuously optimize this feature to make transformation easier. \ No newline at end of file diff --git a/content/en/blogs/understand-requests-and-limits-in-kubernetes.md b/content/en/blogs/understand-requests-and-limits-in-kubernetes.md index 3f90cc369..8fdc5c796 100644 --- a/content/en/blogs/understand-requests-and-limits-in-kubernetes.md +++ b/content/en/blogs/understand-requests-and-limits-in-kubernetes.md @@ -90,11 +90,11 @@ As stated above, requests and limits are two important building blocks for clust ### Before You Begin -KubeSphere features a highly functional multi-tenant system for fine-grained access control of different users. In KubeSphere 3.0, you can set requests and limits for namespaces (ResourceQuotas) and containers (LimitRanges) respectively. To perform these operations, you need to create a workspace, a project (i.e. namespace) and an account (`ws-admin`). For more information, see [Create Workspaces, Projects, Accounts and Roles](https://kubesphere.io/docs/quick-start/create-workspace-and-project/). +KubeSphere features a highly functional multi-tenant system for fine-grained access control of different users. In KubeSphere 3.0, you can set requests and limits for namespaces (ResourceQuotas) and containers (LimitRanges) respectively. To perform these operations, you need to create a workspace, a project (i.e. namespace) and a user (`ws-admin`). For more information, see [Create Workspaces, Projects, Users and Roles](https://kubesphere.io/docs/quick-start/create-workspace-and-project/). ### Set Resource Quotas -1. Go to the **Overview** page of your project, navigate to **Basic Information** in **Project Settings**, and select **Edit Quota** from the **Manage Project** drop-down menu. +1. Go to the **Overview** page of your project, navigate to **Basic Information** in **Project Settings**, and select **Edit Quotas** from the **Manage Project** drop-down menu. ![edit-quota](/images/blogs/en/understand-requests-and-limits-in-kubesphere/edit-quota.png) diff --git a/content/en/blogs/x509-certificate-exporter.md b/content/en/blogs/x509-certificate-exporter.md new file mode 100644 index 000000000..a0c4bfa6c --- /dev/null +++ b/content/en/blogs/x509-certificate-exporter.md @@ -0,0 +1,287 @@ +--- +title: 'Monitoring X.509 Certificates Expiration in Kubernetes Clusters with a Prometheus Exporter' +keywords: x509-certificate-exporter, Prometheus, Kubernetes, Helm, KubeSphere, Certificate Monitoring +description: This article details how to deploy x509-certificate-exporter in Kubernetes and monitor component certificates of a Kubernetes cluster using a custom alerting policy on KubeSphere. +createTime: '2021-11-01' +author: 'Yang Chuansheng, Bettygogo' +snapshot: '/images/blogs/en/x509-certificate-exporter/x509-certificate-exporter-cover-image.png' +--- + +KubeSphere offers a developer-friendly wizard that simplifies the operations & maintenance of Kubernetes, but it is essentially built on Kubernetes. Kubernetes' TLS certificates are valid for only one year, so we need to update the certificates every year, which is unavoidable even though the cluster is installed by the powerful and lightweight installation tool [KubeKey](https://github.com/kubesphere/kubekey). To prevent possible risks arising from certificate expiration, we need to find a way to monitor certificate validity of Kubernetes components. + +Some of you may have heard of [ssl-exporter](https://github.com/ribbybibby/ssl_exporter), which exports metrics for SSL certificates collected from various sources, such as the HTTPS certificate, file certificate, Kubernetes Secret, and kubeconfig file. Basically, ssl-exporter can meet our needs, but it does not have a wealth of metrics. Here, I will share a more powerful Prometheus Exporter: [x509-certificate-exporter](https://github.com/enix/x509-certificate-exporter) with you. + +Unlike ssl-exporter, x509-certificate-exporter only focuses on expiration monitoring of certificates of Kubernetes clusters, such as the file certificates of each component, Kubernetes TLS Secret, and kubeconfig file. Moreover, it provides more metrics. Next, I'll show you how to deploy x509-certificate-exporter on KubeSphere to monitor all certificates of the cluster. + +## Prepare a KubeSphere App Template + +With [OpenPitrix](https://github.com/openpitrix/openpitrix), a multicloud application management platform, [KubeSphere](https://kubesphere.io/) is capable of managing the full lifecycle of apps and allowing you to intuitively deploy and manage apps using the App Store and app templates. For an app that has not been published in the App Store, you can import its Helm chart to the public repository of KubeSphere, or import it to a private app repository to provide an app template. + +Here, we use a KubeSphere app template to deploy x509-certificate-exporter. + +To deploy an app using an app template, you need to create a workspace, a project, and two users (`ws-admin` and `project-regular`), and assign platform role `workspace-admin` in the workspace to `ws-admin`, and role `operator` in the project to `project-regular`. To begin with, let's review the multi-tenant architecture of KubeSphere. + +### Multi-tenant Kubernetes Architecture + +KubeSphere's multi-tenant system is divided into three levels: cluster, workspace, and project (equivalent to [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) of Kubernetes). + +As the system workspace runs system resources, most of which are viewable only, it is suggested that you create a new [workspace](https://kubesphere.com.cn/en/docs/workspace-administration/what-is-workspace/). For security reasons, we strongly recommend you granting different permissions to different tenants when they are collaborating in a workspace. + +You can create multiple workspaces in a KubeSphere cluster. In each workspace, you can create multiple projects. By default, KubeSphere has several built-in roles for each level. Additionally, KubeSphere allows you to create roles with customized permissions. Overall speaking, KubeSphere's multi-tenant architecture is ideal for enterprises and organizations who are yearning for role-based management. + +### Create a User + +After you have installed KubeSphere, you need to create users with different roles so that they can work within the authorized scope. Initially, the system has a default user `admin`, which has been assigned role `platform-admin`. In the following, we will create a user named `user-manager`, which will be used to create new users. + +1. Log in to the KubeSphere web console as user `admin` and the default password is `P@88w0rd`. + +> For account security, it is highly recommended that you change your password the first time you log in to the console. To change your password, click **User Settings** in the drop-down list in the upper-right corner. In **Password Settings**, set a new password. You also can change the language of the console in **User Settings**. + +2. Click **Platform** in the upper-left corner, and then click **Access Control**. + + ![00-access-control](/images/blogs/en/x509-certificate-exporter/00-access-control.png) + + + + In the left navigation pane, click **Platform Roles**, and you will find four available built-in roles. Assign role `users-manager` to the first user you create. + + | Built-in Roles| Description| + |----------|----------| + | `workspaces-manager`| Workspace manager who can manage all workspaces on the KubeSphere platform.| + | `users-manager`| User manager who can manage all users on the KubeSphere platform.| + | `platform-regular`| Regular user who has no access to any resources before joining a workspace.| + | `platform-admin`| Administrator who can manage all resources on the KubeSphere platform.| + + +3. In **Users**, click **Create**. In the displayed dialog box, provide all the necessary information (marked with *) and select `users-manager` for **Platform Role**. + + ![01-create-user](/images/blogs/en/x509-certificate-exporter/01-create-user.png) + + Click ****OK****. In **Users**, you can find the newly created user in the user list. + +4. Log out of the console and log back as user `user-manager` to create another three users listed in the following table. + + | User| Role| Description| + |----------|----------|----------| + | `ws-manager`| `workspaces-manager`| Creates and manages all workspaces.| + | `ws-admin`| `platform-regular`| Manages all resources in a specified workspace (used to invite the `project-regular` user to the workspace).| + | `project-regular`| `platform-regular`| Creates workloads, pipelines, and other resources in a specified project.| + + +5. In **Users**, you can view the three users you just created. + + ![02-three-users](/images/blogs/en/x509-certificate-exporter/02-three-users.png) + +### Create a Workspace + +In this section, you need to use user `ws-manager` created in the previous step to create a workspace. As a basic logic unit for the management of projects, workload creation, and organization members, workspaces underpin the multi-tenant system of KubeSphere. + +1. Log in to KubeSphere as `ws-manager`, who has the permission to manage all workspaces on the platform. Click **Platform** in the upper-left corner and select **Access Control**. In **Workspaces**, you can see there is only one default workspace `system-workspace`, where system-related components and services run. You are not allowed to delete this workspace. + + ![03-ws-manager](/images/blogs/en/x509-certificate-exporter/03-ws-manager.png) + +2. Click **Create** on the right, set a name for the new workspace (for example, `demo-workspace`) and set user `ws-admin` as the workspace administrator. + + ![04-create-workspace](/images/blogs/en/x509-certificate-exporter/04-create-workspace.png) + + Click **Create** after you finish. + +3. Log out of the console, and log back in as `ws-admin`. In **Workspace Settings**, select **Workspace Members**, and then click **Invite**. + + ![05-invite-member](/images/blogs/en/x509-certificate-exporter/05-invite-member.png) + +4. Invite `project-regular` to the workspace, assign it role `workspace-viewer`, and then click **OK**. + + > The actual role name follows a naming convention: \-\. For example, in workspace `demo-workspace`, the actual role name of role `viewer` is `demo-workspace-viewer`. + + ![06-assign-role](/images/blogs/en/x509-certificate-exporter/06-assign-role.png) + +5. After you add `project-regular` to the workspace, click ****OK****. In **Workspace Members**, you can see two members listed. + + | User| Role| Description| + |----------|----------|----------| + | `ws-admin`| `workspace-admin`| Manages all resources under a workspace (Here, it is used to invite new members to the workspace and create a project).| + | `project-regular`| `workspace-viewer`| Creates workloads and other resources in a specified project.| + + + +### Create a Project + +In this section, you need to use the previously created user `ws-admin` to create a project. A project in KubeSphere is the same as a namespace in Kubernetes, which provides virtual isolation for resources. For more information, see [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). + +1. Log in to the KubeSphere web console as `ws-admin`. In **Projects**, click **Create**. + + ![07-create-project](/images/blogs/en/x509-certificate-exporter/07-create-project.png) + +2. Enter a project name (for example, `exporter`) and click **OK**. You can also add an alias and description for the project. + + ![08-enter-project-name](/images/blogs/en/x509-certificate-exporter/08-enter-project-name.png) + +3. In **Projects**, click the project name to view its details. + + ![09-view-project-info](/images/blogs/en/x509-certificate-exporter/09-view-project-info.png) + +4. In **Project Settings**, select **Project Members**, click **Invite** to invite `project-regular` to the project, and assign role `operator` to `project regular`. + + ![10-invite-project-member](/images/blogs/en/x509-certificate-exporter/10-invite-project-member.png) + + ![11-assign-project-role](/images/blogs/en/x509-certificate-exporter/11-assign-project-role.png) + + > Users with role `operator` are project maintainers who can manage resources other than users and roles in the project. + +### Add an App Repository + +1. Log in to the web console of KubeSphere as user `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. + + ![12-add-repo](/images/blogs/en/x509-certificate-exporter/12-add-repo.png) + +2. In the displayed dialog box, specify an app repository name (for example, `enix`) and add your repository URL (for example, `https://charts.enix.io`). Click **Validate** to validate the URL, and then click **OK**. + + ![13-add-repo2](/images/blogs/en/x509-certificate-exporter/13-add-repo2.png) + +3. In **App Repositories**, you can view the created app repository. + + ![14-view-repo](/images/blogs/en/x509-certificate-exporter/14-view-repo.png) + +## Deploy x509-certificate-exporter + +After importing the app repository of x509-certificate-exporter, you can use the app template to deploy x509-certificate-exporter. + +1. Log out of the KubeSphere web console and log in to the console as user `project-regular`. Click the project you created to go to the project page. Go to **Apps** under **Application Workloads**, and click **Create**. + + ![15-create-app](/images/blogs/en/x509-certificate-exporter/15-create-app.png) + +2. In the displayed dialog box, select **From App Template**. + + ![16-create-app2](/images/blogs/en/x509-certificate-exporter/16-create-app2.png) + + **From App Store**: Chooses a built-in app or app uploaded as Helm charts. + + **From App Template**: Chooses an app from a private app repository or the current workspace. + +4. In the drop-down list, select private app repository `enix` you just uploaded. + + ![17-select-enix](/images/blogs/en/x509-certificate-exporter/17-select-enix.png) + +5. Select x509-certificate-exporter for deployment. + + ![18-select-x509](/images/blogs/en/x509-certificate-exporter/18-select-x509.png) + +6. In the drop-down list of **Version**, select an app version, and then click **Deploy**. Meantime, you can view the app information and manifest. + + ![19-deploy-x590](/images/blogs/en/x509-certificate-exporter/19-deploy-x590.png) + +7. Set an app name, confirm the app version and deployment location, and click **Next**. + + ![20-set-app-name](/images/blogs/en/x509-certificate-exporter/20-set-app-name.png) + +8. In **App Settings**, you need to manually edit the manifest and specify the path to the certificate file. + + ![21-app-settings](/images/blogs/en/x509-certificate-exporter/21-app-settings.png) + + ```yaml + daemonSets: + master: + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + watchFiles: + - /var/lib/kubelet/pki/kubelet-client-current.pem + - /etc/kubernetes/pki/apiserver.crt + - /etc/kubernetes/pki/apiserver-kubelet-client.crt + - /etc/kubernetes/pki/ca.crt + - /etc/kubernetes/pki/front-proxy-ca.crt + - /etc/kubernetes/pki/front-proxy-client.crt + watchKubeconfFiles: + - /etc/kubernetes/admin.conf + - /etc/kubernetes/controller-manager.conf + - /etc/kubernetes/scheduler.conf + nodes: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/ingress + operator: Exists + watchFiles: + - /var/lib/kubelet/pki/kubelet-client-current.pem + - /etc/kubernetes/pki/ca.crt + ``` + + Two `DaemonSets` are created, where the master runs on the controller node and the nodes run on the compute node. + + ```bash + $ kubectl -n exporter get ds + + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + x509-x509-certificate-exporter-master 1 1 1 1 1 node-role.kubernetes.io/master= 3d14h + x509-x509-certificate-exporter-nodes 3 3 3 3 3 3d14h + ``` + + Here are how the parameters are defined: + + + **watchFiles:** Specifies the path to the certificate file. + + **watchKubeconfFiles:** Specifies the path to the kubeconfig file. + + ![22-explain-parameters](/images/blogs/en/x509-certificate-exporter/22-explain-parameters.png) + +10. Click **Install** and wait until the app is created successfully and runs. + +![23-view-created-app](/images/blogs/en/x509-certificate-exporter/23-view-created-app.png) + +## Integrate the Monitoring System + +After you deploy the app using the app template, a `ServiceMonitor` will also be created along with two DaemonSets. + +```bash +$ kubectl -n exporter get servicemonitor +NAME AGE +x509-x509-certificate-exporter 3d15h +``` + +Open the web UI of Prometheus, and you can see that the corresponding `Targets` are ready. + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210629142812.png) + +x509-certificate-exporter officially provides a [Grafana Dashboard](https://grafana.com/grafana/dashboards/13922), as shown in the following figure.[](https://grafana.com/grafana/dashboards/13922) + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210629143502.jpg) + +It can be seen that all metrics are crystal clear. Generally, we only need to focus on certificates that have expired and are about to expire. Suppose you want to know validity of a certificate, use the `(x509_cert_not_after{filepath!=""} - time()) / 3600 / 24` expression. + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210629160148.png) + +Additionally, you can create alerting policies so that the O\&M personnel can receive notifications when a certificate is about to expire and update the certificate in time. To create an alerting policy, perform the following steps: + +1. Go to **Alerting Policies** under **Monitoring & Alerting**, and click **Create**. + +![24-monitoring-alerting](/images/blogs/en/x509-certificate-exporter/24-monitoring-alerting.png) + +2. Enter a name for the alerting policy, set the severity, and click **Next**. + +![25-create-alerting-policy](/images/blogs/en/x509-certificate-exporter/25-create-alerting-policy.png) + +3. Click the **Custom Rule** tab, and enter `(x509_cert_not_after{filepath!=""} - time()) / 3600 / 24 < 30` for **Rule Expression**. + +![26-custom-rule](/images/blogs/en/x509-certificate-exporter/26-custom-rule.png) + +4. Click **Next**. On the **Message Settings** page, fill in the summary and details of the alert. + +![27-message-settings](/images/blogs/en/x509-certificate-exporter/27-message-settings.png) + +5. Click **Create**, and the alerting policy is created. + +![28-view-created-alerting-rule](/images/blogs/en/x509-certificate-exporter/28-view-created-alerting-rule.png) + +## Summary + +KubeSphere 3.1 has supported the built-in alerting policies for certificate expiration. To view the policies, go to **Alerting Policies**, click **Bulit-in Policies**, and enter `expir` in the search box. + +![29-built-in-alerting-policy](/images/blogs/en/x509-certificate-exporter/29-built-in-alerting-policy.png) + +Click the alerting policy name to view its rule expression. + +![30-view-alerting-rule](/images/blogs/en/x509-certificate-exporter/30-view-alerting-rule.png) + +Metrics in the rule expression is exposed by the API Server component, and does not contain certificates of all components of the cluster. To monitor certificates of all components, it is recommended that you create a custom alerting policy on KubeSphere while deploying x509-certificate-exporter. Trust me, you will be hassle-free from certificate expiration. \ No newline at end of file diff --git a/content/en/case/ZTO.md b/content/en/case/ZTO.md new file mode 100644 index 000000000..a4029b860 --- /dev/null +++ b/content/en/case/ZTO.md @@ -0,0 +1,113 @@ +--- +title: ZTO Express +description: + +css: scss/case-detail.scss + +section1: + title: ZTO Express + content: Shared by Yang Xiaofei, head of R&D of ZTO Express’s Cloud Platform, this article mainly introduces the development and deployment of KubeSphere on production environment, as well as application scenarios of ZTO Express. + +section2: + listLeft: + - title: 'Company Introduction' + contentList: + - content: ZTO is both a key enabler and a direct beneficiary of China’s fast-growing e-commerce market, and has established itself as one of the largest express delivery service providers for millions of online merchants and consumers transacting on leading Chinese e-commerce platforms, such as Alibaba and JD.com. Globally, ZTO provides delivery services in key overseas markets through its business partners as it expands coverage of international express delivery by collaborating with international industry players. + image: '/images/case/ZTO/ZTO1.jpg' + + - title: 'Background' + contentList: + - content: For further development, five prominent challenges were waiting to be addressed. + - content: First, different versions were required to adapt to different environments. However, as multiple versions were carried out, we could not effectively respond to resources through virtual machines. + - content: Second, frequent upgrading called for quick environment initialization, and new versions were proposed frequently even every week. + - content: Third, resource application and environment initialization were over-complex. We used conventional approaches for resource application in 2019, when trouble tickets were required for environment initialization delivery. It was troublesome and low efficient for testers as they needed to apply for resources first and release those resources after testing. + - content: Fourth, low utilization of existing virtual resource was another problem. Staff turnovers and changes in positions sent abundant resources into zombies, especially on development and testing environment. + - content: Fifth, we lacked horizontal extension capacity. Resources were scarce on important shopping days such as “6.18” and “double 11”. To address this problem, we used to prepare resources in advance and take them back after the events. This proved to be outdated. + - content: Confronting all those challenges, we discussed with developers and decided to embark on cloudification. + + - title: 'Cloudification on Production Environment' + contentList: + - content: Our cloudification includes three steps, namely, cloud-based, cloud-ready and cloud-native. + image: /images/case/ZTO/ZTO2.jpg + + - title: + contentList: + - content: Based on Dubbo framework, our micro-service completed transformation in an early date. However, the micro-service was carried out through virtual machine, when the emergence of Salts led to troubles. Therefore, we needed to make transformations on IaaS and container. + image: /images/case/ZTO/ZTO3.jpg + + - title: 'KubeSphere Development and Deployment' + contentList: + - content: We decided to apply KubeSphere as the construction scheme of our container management platform, ZKE, and as an upper container PaaS platform for running micro-services. + image: /images/case/ZTO/ZTO4.jpg + + - title: 'Construction Direction' + contentList: + - content: In line with the reality, we took KubeSphere as the container platform for running our stateless service, Kubernetes observability, and infrastructure resource monitoring, while stateful service like middlewares are provided in Iaas. + image: /images/case/ZTO/ZTO5.jpg + + - title: 'Small Clusters with a Single Tenant' + contentList: + - content: After the selection of KubeSphere, we encountered another problem——Should we choose small clusters with a single tenant or a large cluster with multi-tenants? After consulting the KubeSphere team and evaluating our own demands, we picked up small clusters with single tenant. In accordance with business scenarios (such as middle desk business, and scanning business) and resource applications (such as big data, edge), we created different clusters. + - content: Based on multi-cluster design, we made cloud transformation in line with KubeSphere v2.0. Each cluster on development, testing and production environment were deployed with a set of KubeSphere, while public components are drawn out, such as monitor and log. + + - title: 'Secondary Development Based on KubeSphere' + contentList: + - content: For realizing some customized features to meet our demand, we integrated our business scenarios to KubeSphere. Here is the integration took place between the summer of 2019 and October of 2020. + + - title: 'Super-Resolution' + contentList: + - content: We applied super-resolution. Hence, once the limit is set, requests could be quickly computed and integrated. On production environment, the super-resolution ratio for CPU is 10 and memory 1.5. + + - title: 'CPU Cluster Monitoring' + contentList: + - content: In this part, we merely applied CPU cluster monitoring for demonstrating the data we monitored. + + - title: 'HPA Horizontal Scaling' + contentList: + - content: We held high expectation in HPA Horizontal Scaling. As KubeSphere resource allocation supports horizontal scaling, we set the horizontal scaling independently and integrated it with super-resolution, thus to facilitate the measurement of the super-resolution ratio. + - content: Based on HPA and clear interface of KubeSphere, we have almost been free from operation and maintenance of some core businesses. In addition, demand in emergency scenarios can be quickly responded. For example, when it comes to upstream consumption backlogs, we can quickly increase replication and give a instant response. + + - title: 'Batch Restart' + contentList: + - content: As abundant deployments might be restarted under extreme conditions, we set an exclusive module in particular. Hence, what we need is only one click to get instant restart and quick response of clusters or deployments under one Namespace. + + - title: 'Affinity of Container' + contentList: + - content: In terms of affinity of container, we applied the soft anti-affinity, as some applications found their resource usage mutually exclusive. In addition, we also added some features and affinity settings in this part. + + - title: 'Scheduling Strategy' + contentList: + - content: In terms of scheduling strategy, the features of specifying a host group and exclusive host stood out. As some of our businesses needed to access to the internet port, we put all those businesses within one host group and provided it with access to the internet. We also applied exclusive host to run big data applications in the early hours of morning, because the service was idle at that moment. + + - title: 'Gateway' + contentList: + - content: Each Namespace in KubeSphere held an independent gateway. Independent gateway met our production requirement, while we also needed pan-gateway in development and testing, thus to achieve quicker responses to servers. Hence, we set both pan-gateway and independent gateway, and had access to all development and testing through pan-domain name. After configuration, our services could be directly accessed through KubeSphere interface. + + - title: 'Log Collection' + contentList: + - content: We used to apply Fluent-Bit for log collection, while since there were some mistakes made in resources upgrading or parameters, it always failed as businesses kept increasing. Therefore, we turned to Sidecar. Services based on Java all set an independent Sidecar, and pushed logs to centers like ElasticSearch through Logkit, a small agent. We continued to use Fluent-agent to collect logs under development and testing environment, while for production scenarios that require complete logs, we took further steps to ensure that logs were persistently stored at disks. All logs of containers were collected through four approaches, including console log, Fluent-agent console log, /data Sldercar-logkit and /data NFS. + + - title: 'Event Tracing' + contentList: + - content: In term of Event Tracing, we made transformation on the basis of Kube-eventer, and added event tracing to KubeSphere, where configurated information could be sent to Ding Talk. As for changes in businesses that were highly concerned under production environment, we could send them to work group of Ding Talk through customized configuration. + + - title: 'Future Planning' + contentList: + - content: In the future, we would like to make some improvements in several aspects. First of all, service plate will ensure that all individuals, including operators, maintainers as well as developers, can understand the framework of the services provided, the middlewares and data bases relied on,as well as the running status. Second, it is expected that status quo of all PODS, including changes in color and resources allocation can be seen from the perspective of the whole cluster. Third, we hope that edge computing can be applied for uploading scanned statistics of transferred expresses, automatic recognition of violate practice of operator, the wisdom park project and other purposes. + - content: In addition, we also encounter some difficulties in the management of abundant edge nodes, stability and high availability of KubeEdge, and deployment and automatic operation and maintenance of edge nodes. We are exploring more uncharted areas with the pursuit of breakthroughs. + + rightPart: + icon: /images/case/ZTO/ZTO6.jpg + list: + - title: INDUSTRY + content: Delivery + - title: LOCATION + content: China + - title: CLOUD TYPE + content: On-premises + - title: CHALLENGES + content: Multi-clusters, HA, Microservice Migration, Unifying Container and VM Networking + - title: ADOPTED FEATURES + content: HPA, DevOps, Grayscale Release, Monitoring and Alerting + +--- diff --git a/content/en/case/_index.md b/content/en/case/_index.md index 7c5e10f00..abfc0fc42 100644 --- a/content/en/case/_index.md +++ b/content/en/case/_index.md @@ -35,6 +35,10 @@ section2: - icon: "images/case/vng.jpg" content: "VNG has seen 14 years of continuous development and expansion to become one of the leading IT companies in Vietnam and Southeast Asia." link: "vng/" + + - icon: "images/case/ZTO/ZTO6.jpg" + content: "ZTO is both a key enabler and a direct beneficiary of China’s fast-growing e-commerce market, and has established itself as one of the largest express delivery service providers for millions of online merchants and consumers transacting on leading Chinese e-commerce platforms, such as Alibaba and JD.com." + link: "ZTO/" section3: title: 'Various Industries are Powered by KubeSphere' diff --git a/content/en/common/kubernetes-versions.md b/content/en/common/kubernetes-versions.md index 0359ad8c5..3aa67d348 100644 --- a/content/en/common/kubernetes-versions.md +++ b/content/en/common/kubernetes-versions.md @@ -5,6 +5,6 @@ _build: | Installation Tool | KubeSphere version | Supported Kubernetes versions | | ----------------- | ------------------ | ------------------------------------------------------------ | -| KubeKey | v3.1.1 | v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9, v1.18.3, v1.18.5, v1.18.6, v1.18.8, v1.19.0, v1.19.8, v1.19.9, v1.20.4, v1.20.6 | -| ks-installer | v3.1.1 | v1.17.x, v1.18.x, v1.19.x, v1.20.x | +| KubeKey | 3.2.0 | v1.19.x, v1.20.x, v1.21.x, v1.22.x (experimental) | +| ks-installer | 3.2.0 | v1.19.x, v1.20.x, v1.21.x, v1.22.x (experimental) | diff --git a/content/en/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md b/content/en/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md index a162bb26f..86d383759 100644 --- a/content/en/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md +++ b/content/en/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md @@ -1,54 +1,62 @@ --- -title: "OIDC identity provider" +title: "OIDC Identity Provider" keywords: "OIDC, identity provider" -description: "How to configure authentication" +description: "How to use an external OIDC identity provider." -linkTitle: "OIDC identity provider" +linkTitle: "OIDC Identity Provider" weight: 12221 --- ## OIDC Identity Provider -[OpenID Connect](https://openid.net/connect/) is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It uses straightforward REST/JSON message flows with a design goal of “making simple things simple and complicated things possible”. It’s uniquely easy for developers to integrate, compared to any preceding Identity protocol, such as Keycloak, Okta, Dex, Auth0, Gluu, and many more. +[OpenID Connect](https://openid.net/connect/) is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It uses straightforward REST/JSON message flows with a design goal of “making simple things simple and complicated things possible”. It’s uniquely easy for developers to integrate, compared to any preceding Identity protocol, such as Keycloak, Okta, Dex, Auth0, Gluu, Casdoor and many more. +## Prerequisites +You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/installing-on-linux/) and [Installing on Kubernetes](/docs/installing-on-kubernetes/). -*Example of using [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect)*: +## Procedure -```yaml -apiVersion: v1 -data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - jwtSecret: "********" - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: google - type: OIDCIdentityProvider - mappingMethod: auto - provider: - clientID: '********' - clientSecret: '********' - issuer: https://accounts.google.com - redirectURL: 'https://ks-console/oauth/redirect/google' -kind: ConfigMap -name: kubesphere-config -namespace: kubesphere-system -``` +1. Log in to KubeSphere as `admin`, move the cursor to in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: -For the above example: + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. Add the following fields under `spec.authentication.jwtSecret`. + + *Example of using [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect)*: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: google + type: OIDCIdentityProvider + mappingMethod: auto + provider: + clientID: '********' + clientSecret: '********' + issuer: https://accounts.google.com + redirectURL: 'https://ks-console/oauth/redirect/google' + ``` + + See description of parameters as below: + + | Parameter | Description | + | -------------------- | ------------------------------------------------------------ | + | clientID | The OAuth2 client ID. | + | clientSecret | The OAuth2 client secret. | + | redirectURL | The redirected URL to ks-console in the following format: `https:///oauth/redirect/`. The `` in the URL corresponds to the value of `oauthOptions:identityProviders:name`. | + | issuer | Defines how Clients dynamically discover information about OpenID Providers. | + | preferredUsernameKey | Configurable key which contains the preferred username claims. This parameter is optional. | + | emailKey | Configurable key which contains the email claims. This parameter is optional. | + | getUserInfo | GetUserInfo uses the userinfo endpoint to get additional claims for the token. This is especially useful where upstreams return "thin" ID tokens. This parameter is optional. | + | insecureSkipVerify | Used to turn off TLS certificate verification. | -| Parameter | Description | -| ----------| ----------- | -| clientID | The OAuth2 client ID. | -| clientSecret | The OAuth2 client secret. | -| redirectURL | The redirected URL to ks-console. | -| issuer | Defines how Clients dynamically discover information about OpenID Providers. | -| preferredUsernameKey | Configurable key which contains the preferred username claims. | -| emailKey | Configurable key which contains the email claims. | -| getUserInfo | GetUserInfo uses the userinfo endpoint to get additional claims for the token. This is especially useful where upstreams return "thin" id tokens. | -| insecureSkipVerify | Used to turn off TLS certificate verify. | \ No newline at end of file diff --git a/content/en/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md b/content/en/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md index 6b50e7ac3..f95aaec38 100644 --- a/content/en/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md +++ b/content/en/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md @@ -9,7 +9,7 @@ weight: 12210 This document describes how to use an external identity provider such as an LDAP service or Active Directory service on KubeSphere. -KubeSphere provides a built-in OAuth server. Users can obtain OAuth access tokens to authenticate themselves to the KubeSphere API. As a KubeSphere administrator, you can edit the `kubesphere-config` ConfigMap to configure OAuth and specify identity providers. +KubeSphere provides a built-in OAuth server. Users can obtain OAuth access tokens to authenticate themselves to the KubeSphere API. As a KubeSphere administrator, you can edit `ks-installer` of the CRD `ClusterConfiguration` to configure OAuth and specify identity providers. ## Prerequisites @@ -18,57 +18,49 @@ You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. F ## Procedure -1. Log in to KubeSphere as `admin`, move the cursor to in the bottom-right corner, click **Kubectl**, and run the following command to edit the `kubesphere-config` ConfigMap: +1. Log in to KubeSphere as `admin`, move the cursor to in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: ```bash - kubectl -n kubesphere-system edit cm kubesphere-config + kubectl -n kubesphere-system edit cc ks-installer ``` -2. Configure fields in the `data:kubesphere.yaml:authentication` section. +2. Add the following fields under `spec.authentication.jwtSecret`. Example: ```yaml - apiVersion: v1 - data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - loginHistoryRetentionPeriod: 168h - maximumClockSkew: 10s - multipleLogin: true - jwtSecret: "********" - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: ldap - type: LDAPIdentityProvider - mappingMethod: auto - provider: - host: 192.168.0.2:389 - managerDN: uid=root,cn=users,dc=nas - managerPassword: ******** - userSearchBase: cn=users,dc=nas - loginAttribute: uid - mailAttribute: mail + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + loginHistoryRetentionPeriod: 168h + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail ``` - + The fields are described as follows: - * `authenticateRateLimiterMaxTries`: Maximum number of consecutive login failures allowed during a period specified by `authenticateRateLimiterDuration`. If the number of consecutive login failures of a user reaches the limit, the user will be blocked. - - * `authenticateRateLimiterDuration`: Period during which `authenticateRateLimiterMaxTries` applies. - - * `loginHistoryRetentionPeriod`: Retention period of login records. Outdated login records are automatically deleted. - - * `maximumClockSkew`: Maximum clock skew for time-sensitive operations such as token expiration validation. The default value is `10s`. - - * `multipleLogin`: Whether multiple users are allowed to log in from different locations. The default value is `true`. - * `jwtSecret`: Secret used to sign user tokens. In a multi-cluster environment, all clusters must [use the same Secret](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster). - + * `authenticateRateLimiterMaxTries`: Maximum number of consecutive login failures allowed during a period specified by `authenticateRateLimiterDuration`. If the number of consecutive login failures of a user reaches the limit, the user will be blocked. + * `authenticateRateLimiterDuration`: Period during which `authenticateRateLimiterMaxTries` applies. + * `loginHistoryRetentionPeriod`: Retention period of login records. Outdated login records are automatically deleted. + * `maximumClockSkew`: Maximum clock skew for time-sensitive operations such as token expiration validation. The default value is `10s`. + * `multipleLogin`: Whether multiple users are allowed to log in from different locations. The default value is `true`. * `oauthOptions`: OAuth settings. * `accessTokenMaxAge`: Access token lifetime. For member clusters in a multi-cluster environment, the default value is `0h`, which means access tokens never expire. For other clusters, the default value is `2h`. * `accessTokenInactivityTimeout`: Access token inactivity timeout period. An access token becomes invalid after it is idle for a period specified by this field. After an access token times out, the user needs to obtain a new access token to regain access. @@ -76,10 +68,10 @@ You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. F * `name`: Identity provider name. * `type`: Identity provider type. * `mappingMethod`: Account mapping method. The value can be `auto` or `lookup`. - * If the value is `auto` (default), you need to specify a new username. KubeSphere automatically creates a user according to the username and maps the user to a third-party account. + * If the value is `auto` (default), you need to specify a new username. KubeSphere automatically creates a user according to the username and maps the user to a third-party account. * If the value is `lookup`, you need to perform step 3 to manually map an existing KubeSphere user to a third-party account. * `provider`: Identity provider information. Fields in this section vary according to the identity provider type. - + 3. If `mappingMethod` is set to `lookup`, run the following command and add the labels to map a KubeSphere user to a third-party account. Skip this step if `mappingMethod` is set to `auto`. ```bash @@ -92,17 +84,13 @@ You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. F iam.kubesphere.io/origin-uid: ``` -4. After the fields are configured, run the following command to restart ks-apiserver. +4. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. - ```bash - kubectl -n kubesphere-system rollout restart deploy/ks-apiserver - ``` - -{{< notice note >}} - -In a multi-cluster environment, you only need to configure the Host Cluster. - -{{}} + {{< notice note >}} + + In a multi-cluster environment, you only need to configure the host cluster. + + {{}} ## Identity provider @@ -111,14 +99,14 @@ You can configure multiple identity providers (IdPs) in the 'identityProviders' Kubesphere provides the following types of identity providers by default: -* [LDAPIdentityProvider](../use-an-ldap-service) +* [LDAP Identity Provider](../use-an-ldap-service) -* [OIDCIdentityProvider](../oidc-identity-provider) +* [OIDC Identity Provider](../oidc-identity-provider) -* [GitHubIdentityProvider]() +* [GitHub Identity Provider]() -* [CASIdentityProvider]() +* [CAS Identity Provider]() -* [AliyunIDaaSProvider]() +* [Aliyun IDaaS Provider]() You can also expand the kubesphere [OAuth2 authentication plug-in](../use-an-oauth2-identity-provider) to integrate with your account system. diff --git a/content/en/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md b/content/en/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md index 276fc7b17..d2d2eb67c 100644 --- a/content/en/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md +++ b/content/en/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md @@ -14,44 +14,39 @@ This document describes how to use an LDAP service as an external identity provi * You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/installing-on-linux/) and [Installing on Kubernetes](/docs/installing-on-kubernetes/). * You need to obtain the manager distinguished name (DN) and manager password of an LDAP service. -### Procedure +## Procedure -1. Log in to KubeSphere as `admin`, move the cursor to in the bottom-right corner, click **Kubectl**, and run the following command to edit the `kubesphere-config` ConfigMap: +1. Log in to KubeSphere as `admin`, move the cursor to in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: ```bash - kubectl -n kubesphere-system edit cm kubesphere-config + kubectl -n kubesphere-system edit cc ks-installer ``` Example: ```yaml - apiVersion: v1 - data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - loginHistoryRetentionPeriod: 168h - maximumClockSkew: 10s - multipleLogin: true - jwtSecret: "********" - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: LDAP - type: LDAPIdentityProvider - mappingMethod: auto - provider: - host: 192.168.0.2:389 - managerDN: uid=root,cn=users,dc=nas - managerPassword: ******** - userSearchBase: cn=users,dc=nas - loginAttribute: uid - mailAttribute: mail + spec: + authentication: + jwtSecret: '' + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail ``` - -2. Configure fields other than `oauthOptions:identityProviders` in the `data:kubesphere.yaml:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). + +2. Configure fields other than `oauthOptions:identityProviders` in the `spec:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). 3. Configure fields in `oauthOptions:identityProviders` section. @@ -80,19 +75,27 @@ This document describes how to use an LDAP service as an external identity provi iam.kubesphere.io/origin-uid: ``` -5. After the fields are configured, run the following command to restart ks-apiserver. +5. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. + + {{< notice note >}} + + The KubeSphere web console is unavailable during the restart of ks-installer. Please wait until the restart is complete. + + {{}} + +6. If you are using KubeSphere 3.2.0, run the following command after configuring LDAP and wait until `ks-installer` is up and running: ```bash - kubectl -n kubesphere-system rollout restart deploy/ks-apiserver + kubectl -n kubesphere-system set image deployment/ks-apiserver *=kubesphere/ks-apiserver:v3.2.1 ``` {{< notice note >}} - The KubeSphere web console is unavailable during the restart of ks-apiserver. Please wait until the restart is complete. + If you are using KubeSphere 3.2.1, skip this step. {{}} -6. Go to the KubeSphere login page and enter the username and password of an LDAP user to log in. +7. Go to the KubeSphere login page and enter the username and password of an LDAP user to log in. {{< notice note >}} diff --git a/content/en/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md b/content/en/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md index c9fada5db..0ea1f959d 100644 --- a/content/en/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md +++ b/content/en/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md @@ -81,57 +81,49 @@ KubeSphere provides two built-in OAuth 2.0 plugins: [GitHubIdentityProvider](htt ## Integrate an Identity Provider with KubeSphere -1. Log in to KubeSphere as `admin`, move the cursor to in the bottom-right corner, click **Kubectl**, and run the following command to edit the `kubesphere-config` ConfigMap: +1. Log in to KubeSphere as `admin`, move the cursor to in the lower-right corner, click **kubectl**, and run the following command to edit `ks-installer` of the CRD `ClusterConfiguration`: ```bash - kubectl -n kubesphere-system edit cm kubesphere-config + kubectl -n kubesphere-system edit cc ks-installer ``` -2. Configure fields other than `oauthOptions:identityProviders` in the `data:kubesphere.yaml:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). +2. Configure fields other than `oauthOptions:identityProviders` in the `spec:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). 3. Configure fields in `oauthOptions:identityProviders` section according to the identity provider plugin you have developed. The following is a configuration example that uses GitHub as an external identity provider. For details, see the [official GitHub documentation](https://docs.github.com/en/developers/apps/building-oauth-apps) and the [source code of the GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. ```yaml - apiVersion: v1 - data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - jwtSecret: '******' - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: github - type: GitHubIdentityProvider - mappingMethod: auto - provider: - clientID: '******' - clientSecret: '******' - redirectURL: 'https://ks-console/oauth/redirect/github' + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: github + type: GitHubIdentityProvider + mappingMethod: auto + provider: + clientID: '******' + clientSecret: '******' + redirectURL: 'https://ks-console/oauth/redirect/github' ``` - + Similarly, you can also use Alibaba Cloud IDaaS as an external identity provider. For details, see the official [Alibaba IDaaS documentation](https://www.alibabacloud.com/help/product/111120.htm?spm=a3c0i.14898238.2766395700.1.62081da1NlxYV0) and the [source code of the AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. -4. After the `kubesphere-config` ConfigMap is modified, run the following command to restart ks-apiserver. - - ```bash - kubectl -n kubesphere-system rollout restart deploy/ks-apiserver - ``` +4. After the fields are configured, save your changes, and wait until the restart of ks-installer is complete. {{< notice note >}} - - The KubeSphere web console is unavailable during the restart of ks-apiserver. Please wait until the restart is complete. + + The KubeSphere web console is unavailable during the restart of ks-installer. Please wait until the restart is complete. {{}} 5. Go to the KubeSphere login page, click **Log In with XXX** (for example, **Log In with GitHub**). - ![github-login-page](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/kubesphere-login-page.png) - 6. On the login page of the external identity provider, enter the username and password of a user configured at the identity provider to log in to KubeSphere. ![github-login-page](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/github-login-page.png) diff --git a/content/en/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md b/content/en/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md index 5e3799d7c..14bae1b80 100644 --- a/content/en/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md +++ b/content/en/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md @@ -1,6 +1,6 @@ --- title: "Kubernetes Multi-tenancy in KubeSphere" -keywords: "Kubernetes, Kubesphere, multi-tenancy" +keywords: "Kubernetes, KubeSphere, multi-tenancy" description: "Understand the multi-tenant architecture in KubeSphere." linkTitle: "Multi-tenancy in KubeSphere" weight: 12100 @@ -36,8 +36,6 @@ Multi-level access control and resource quota limits underlie resource isolation Similar to Kubernetes, KubeSphere uses RBAC to manage permissions granted to users, thus logically implementing resource isolation. -![rbac](/images/docs/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png) - The access control in KubeSphere is divided into three levels: platform, workspace and project. You use roles to control what permissions users have at different levels for different resources. 1. [Platform roles](/docs/quick-start/create-workspace-and-project/): Control what permissions platform users have for platform resources, such as clusters, workspaces and platform members. diff --git a/content/en/docs/application-store/app-lifecycle-management.md b/content/en/docs/application-store/app-lifecycle-management.md index 32989038d..c46ec1920 100644 --- a/content/en/docs/application-store/app-lifecycle-management.md +++ b/content/en/docs/application-store/app-lifecycle-management.md @@ -8,93 +8,67 @@ weight: 14100 KubeSphere integrates [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source multi-cloud application management platform, to set up the App Store, managing Kubernetes applications throughout their entire lifecycle. The App Store supports two kinds of application deployment: -- **App templates** provide a way for developers and independent software vendors (ISVs) to share applications with users in a workspace. You can also import third-party app repositories within a workspace. -- **Composing apps** help users quickly build a complete application using multiple microservices to compose it. KubeSphere allows users to select existing services or create new services to create a composing app on the one-stop console. - -![app-store](/images/docs/appstore/application-lifecycle-management/app-store.png) +- **Template-Based Apps** provide a way for developers and independent software vendors (ISVs) to share applications with users in a workspace. You can also import third-party app repositories within a workspace. +- **Composed Apps** help users quickly build a complete application using multiple microservices to compose it. KubeSphere allows users to select existing services or create new services to create a composed app on the one-stop console. Using [Redis](https://redis.io/) as an example application, this tutorial demonstrates how to manage the Kubernetes app throughout the entire lifecycle, including submission, review, test, release, upgrade and removal. ## Prerequisites - You need to enable the [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/). -- You need to create a workspace, a project and an account (`project-regular`). For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Hands-on Lab -### Step 1: Create a customized role and account +### Step 1: Create a customized role and two users -You need to create two accounts first, one for ISVs (`isv`) and the other (`reviewer`) for app technical reviewers. +You need to create two users first, one for ISVs (`isv`) and the other (`reviewer`) for app technical reviewers. -1. Log in to the KubeSphere console with the account `admin`. Click **Platform** in the top-left corner and select **Access Control**. In **Account Roles**, click **Create**. - - ![create-role](/images/docs/appstore/application-lifecycle-management/create-role.png) +1. Log in to the KubeSphere console with the user `admin`. Click **Platform** in the upper-left corner and select **Access Control**. In **Platform Roles**, click **Create**. 2. Set a name for the role, such as `app-review`, and click **Edit Permissions**. - ![app-review-name](/images/docs/appstore/application-lifecycle-management/app-review-name.png) - -3. In **App Management**, choose **App Template Management** and **App Template Viewing** in the permission list, then click **OK**. - - ![create-roles](/images/docs/appstore/application-lifecycle-management/create-roles.png) +3. In **App Management**, choose **App Template Management** and **App Template Viewing** in the permission list, and then click **OK**. {{< notice note >}} - The account granted the role `app-review` is able to view the App Store on the platform and manage apps, including review and removal. + The user who is granted the role `app-review` has the permission to view the App Store on the platform and manage apps, including review and removal. {{}} -4. As the role is ready now, you need to create an account and grant the role of `app-review` to it. In **Accounts**, click **Create**. Provide the required information and click **OK**. +4. As the role is ready now, you need to create a user and grant the role `app-review` to it. In **Users**, click **Create**. Provide the required information and click **OK**. - ![create-review-role](/images/docs/appstore/application-lifecycle-management/create-review-role.png) +5. Similarly, create another user `isv`, and grant the role of `platform-regular` to it. -5. Similarly, create another account `isv`, and grant the role of `platform-regular` to it. - - ![account-ready](/images/docs/appstore/application-lifecycle-management/account-ready.png) - -6. Invite both accounts created above to an existing workspace such as `demo-workspace`, and grant them the role of `workspace-admin`. +6. Invite both users created above to an existing workspace such as `demo-workspace`, and grant them the role of `workspace-admin`. ### Step 2: Upload and submit an application 1. Log in to KubeSphere as `isv` and go to your workspace. You need to upload the example app Redis to this workspace so that it can be used later. First, download the app [Redis 11.3.4](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-11.3.4.tgz) and click **Upload Template** in **App Templates**. - ![upload-app](/images/docs/appstore/application-lifecycle-management/upload-app.png) - {{< notice note >}} In this example, a new version of Redis will be uploaded later to demonstrate the upgrade feature. {{}} -2. In the dialog that appears, click **Upload Helm Chart Package** to upload the chart file. Click **OK** to continue. +2. In the dialog that appears, click **Upload Helm Chart** to upload the chart file. Click **OK** to continue. - ![upload-template](/images/docs/appstore/application-lifecycle-management/upload-template.png) - -3. Basic information of the app displays under **App Information**. To upload an icon for the app, click **Upload icon**. You can also skip it and click **OK** directly. +3. Basic information of the app displays under **App Information**. To upload an icon for the app, click **Upload Icon**. You can also skip it and click **OK** directly. {{< notice note >}} - Maximum accepted resolutions of the app icon: 96 x 96 pixels. + The maximum accepted resolution of the app icon is 96 x 96 pixels. {{}} - ![upload-icon](/images/docs/appstore/application-lifecycle-management/upload-icon.png) +4. The app displays in the template list with the status **Developing** after it is successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. -4. The app displays in the template list with the status **Draft** after successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. - - ![app-draft](/images/docs/appstore/application-lifecycle-management/app-draft.png) - -5. Go to the detail page of the app template by clicking Redis from the list. You can edit the basic information of this app by clicking **Edit Information**. - - ![edit-app-template](/images/docs/appstore/application-lifecycle-management/edit-app-template.png) +5. Go to the detail page of the app template by clicking Redis from the list. You can edit the basic information of this app by clicking **Edit**. 6. You can customize the app's basic information by specifying the fields in the pop-up window. - ![edit-app-information](/images/docs/appstore/application-lifecycle-management/edit-app-information.png) - -7. Click **OK** to save your changes, then you can test this application by deploying it to Kubernetes. Click the draft version to expand the menu and select **Test Deployment**. - - ![test-deployment](/images/docs/appstore/application-lifecycle-management/test-deployment.png) +7. Click **OK** to save your changes, then you can test this application by deploying it to Kubernetes. Click the draft version to expand the menu and click **Install**. {{< notice note >}} @@ -102,11 +76,7 @@ You need to create two accounts first, one for ISVs (`isv`) and the other (`revi {{}} -8. Select the cluster and project to which you want to deploy the app, set up different configurations for the app, and then click **Deploy**. - - ![deployment-place](/images/docs/appstore/application-lifecycle-management/deployment-place.png) - - ![deploying-app](/images/docs/appstore/application-lifecycle-management/deploying-app.png) +8. Select the cluster and project to which you want to deploy the app, set up different configurations for the app, and then click **Install**. {{< notice note >}} @@ -114,13 +84,9 @@ You need to create two accounts first, one for ISVs (`isv`) and the other (`revi {{}} -9. Wait for a few minutes, then switch to the tab **Deployed Instances**. You will find that Redis has been deployed successfully. +9. Wait for a few minutes, then switch to the tab **App Instances**. You will find that Redis has been deployed successfully. - ![deployed-instance-success](/images/docs/appstore/application-lifecycle-management/deployed-instance-success.png) - -10. After you test the app with no issues found, you can click **Submit for Review** to submit this application for review. - - ![submit-for-review](/images/docs/appstore/application-lifecycle-management/submit-for-review.png) +10. After you test the app with no issues found, you can click **Submit for Release** to submit this application for release. {{< notice note >}} @@ -128,37 +94,25 @@ The version number must start with a number and contain decimal points. {{}} -11. After the app is submitted, the app status will change to **Submitted**. Now app reviewers can review it. +11. After the app is submitted, the app status will change to **Submitted**. Now app reviewers can release it. - ![submitted-app](/images/docs/appstore/application-lifecycle-management/submitted-app.png) +### Step 3: Release the application -### Step 3: Review the application +1. Log out of KubeSphere and log back in as `app-reviewer`. Click **Platform** in the upper-left corner and select **App Store Management**. On the **App Release** page, the app submitted in the previous step displays under the tab **Unreleased**. -1. Log out of KubeSphere and log back in as `reviewer`. Click **Platform** in the top-left corner and select **App Store Management**. On the **App Review** page, the app submitted in the previous step displays under the tab **Unprocessed**. +2. To release this app, click it to inspect the app information, introduction, chart file and update logs from the pop-up window. - ![app-to-be-reviewed](/images/docs/appstore/application-lifecycle-management/app-to-be-reviewed.png) - -2. To review this app, click it to inspect the app information, introduction, chart file and update logs from the pop-up window. - - ![reviewing](/images/docs/appstore/application-lifecycle-management/reviewing.png) - -3. It is the responsibility of the reviewer to decide whether the app meets the criteria to be released to the App Store. Click **Pass** to approve it or **Reject** to deny an app submission. +3. The reviewer needs to decide whether the app meets the release criteria on the App Store. Click **Pass** to approve it or **Reject** to deny an app submission. ### Step 4: Release the application to the App Store After the app is approved, `isv` can release the Redis application to the App Store, allowing all users on the platform to find and deploy this application. -1. Log out of KubeSphere and log back in as `isv`. Go to your workspace and click Redis on the **App Templates** page. On its detail page, expand the version menu, then click **Release to Store**. In the pop-up prompt, click **OK** to confirm. +1. Log out of KubeSphere and log back in as `isv`. Go to your workspace and click Redis on the **Template-Based Apps** page. On its details page, expand the version menu, then click **Release to Store**. In the pop-up prompt, click **OK** to confirm. - ![app-templates-page](/images/docs/appstore/application-lifecycle-management/app-templates-page.png) +2. Under **App Release**, you can see the app status. **Activated** means it is available in the App Store. -2. Under **App Review**, you can see the app status. **Active** means it is available in the App Store. - - ![app-active](/images/docs/appstore/application-lifecycle-management/app-active.png) - -3. Click **View in Store** to go to its **App Information** page in the App Store. Alternatively, click **App Store** in the top-left corner and you can also see the app. - - ![redis](/images/docs/appstore/application-lifecycle-management/redis.png) +3. Click **View in Store** to go to its **Versions** page in the App Store. Alternatively, click **App Store** in the upper-left corner, and you can also see the app. {{< notice note >}} @@ -166,27 +120,21 @@ After the app is approved, `isv` can release the Redis application to the App St {{}} -4. Now, users in the workspace can deploy Redis from the App Store. To deploy the app to Kubernetes, click the app to go to its **App Information** page, and click **Deploy**. - - ![deploy-redis](/images/docs/appstore/application-lifecycle-management/deploy-redis.png) +4. Now, users in the workspace can install Redis from the App Store. To install the app to Kubernetes, click the app to go to its **App Information** page, and click **Install**. {{< notice note >}} - If you have trouble deploying an application and the **Status** column shows **Failed**, you can hover your cursor over the **Failed** icon to see the error message. + If you have trouble installing an application and the **Status** column shows **Failed**, you can hover your cursor over the **Failed** icon to see the error message. {{}} -### Step 5: Create an app category +### Step 5: Create an application category -`reviewer` can create multiple categories for different types of applications based on their function and usage. It is similar to setting tags and categories can be used in the App Store as filters, such as Big Data, Middleware, and IoT. +`app-reviewer` can create multiple categories for different types of applications based on their function and usage. It is similar to setting tags and categories can be used in the App Store as filters, such as Big Data, Middleware, and IoT. -1. Log in to KubeSphere as `reviewer`. To create a category, go to the **App Store Management** page and click in **App Categories**. +1. Log in to KubeSphere as `app-reviewer`. To create a category, go to the **App Store Management** page and click in **App Categories**. - ![app-category](/images/docs/appstore/application-lifecycle-management/app-category.png) - -2. Set a name and icon for the category in the dialog, then click **OK**. For Redis, you can enter `Database` for the field **Category Name**. - - ![set-app-type](/images/docs/appstore/application-lifecycle-management/set-app-type.png) +2. Set a name and icon for the category in the dialog, then click **OK**. For Redis, you can enter `Database` for the field **Name**. {{< notice note >}} @@ -196,37 +144,23 @@ After the app is approved, `isv` can release the Redis application to the App St 3. As the category is created, you can assign the category to your app. In **Uncategorized**, select Redis and click **Change Category**. - ![set-category-for-app](/images/docs/appstore/application-lifecycle-management/set-category-for-app.png) - 4. In the dialog, select the category (**Database**) from the drop-down list and click **OK**. - ![confirm-category](/images/docs/appstore/application-lifecycle-management/confirm-category.jpg) - 5. The app displays in the category as expected. - ![app-in-category-list-expected](/images/docs/appstore/application-lifecycle-management/app-in-category-list-expected.png) - ### Step 6: Add a new version To allow workspace users to upgrade apps, you need to add new app versions to KubeSphere first. Follow the steps below to add a new version for the example app. -1. Log in to KubeSphere as `isv` again and navigate to **App Templates**. Click the app Redis in the list. +1. Log in to KubeSphere as `isv` again and navigate to **Template-Based Apps**. Click the app Redis in the list. 2. Download [Redis 12.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-12.0.0.tgz), which is a new version of Redis for demonstration in this tutorial. On the tab **Versions**, click **New Version** on the right to upload the package you just downloaded. - ![new-version-redis](/images/docs/appstore/application-lifecycle-management/new-version-redis.png) - -3. Click **Upload Helm Chart Package** and click **OK** after it is uploaded. - - ![upload-new-redis-version](/images/docs/appstore/application-lifecycle-management/upload-new-redis-version.png) +3. Click **Upload Helm Chart** and click **OK** after it is uploaded. 4. The new app version displays in the version list. You can click it to expand the menu and test the new version. Besides, you can also submit it for review and release it to the App Store, which is the same as the steps shown above. - ![uploaded-new-version](/images/docs/appstore/application-lifecycle-management/uploaded-new-version.png) - - ![see-new-version](/images/docs/appstore/application-lifecycle-management/see-new-version.png) - -### Step 7: Upgrade +### Step 7: Upgrade an application After a new version is released to the App Store, all users can upgrade this application to the new version. @@ -236,18 +170,12 @@ To follow the steps below, you must deploy an app of one of its old versions fir {{}} -1. Log in to KubeSphere as `project-regular`, navigate to the **Apps** page of the project, and click the app to be upgraded. +1. Log in to KubeSphere as `project-regular`, navigate to the **Apps** page of the project, and click the app to upgrade. - ![app-to-be-upgraded](/images/docs/appstore/application-lifecycle-management/app-to-be-upgraded.png) - -2. Click **More** and select **Edit Template** from the drop-down menu. - - ![edit-template](/images/docs/appstore/application-lifecycle-management/edit-template.png) +2. Click **More** and select **Edit Settings** from the drop-down list. 3. In the window that appears, you can see the YAML file of application configurations. Select the new version from the drop-down list on the right. You can customize the YAML file of the new version. In this tutorial, click **Update** to use the default configurations directly. - ![upgrade-app](/images/docs/appstore/application-lifecycle-management/upgrade-app.png) - {{< notice note >}} You can select the same version from the drop-down list on the right as that on the left to customize current application configurations through the YAML file. @@ -256,22 +184,14 @@ To follow the steps below, you must deploy an app of one of its old versions fir 4. On the **Apps** page, you can see that the app is being upgraded. The status will change to **Running** when the upgrade finishes. - ![version-upgraded](/images/docs/appstore/application-lifecycle-management/version-upgraded.png) - - ![upgrade-finish](/images/docs/appstore/application-lifecycle-management/upgrade-finish.png) - -### Step 8: Suspend the application +### Step 8: Suspend an application You can choose to remove an app entirely from the App Store or suspend a specific app version. -1. Log in to KubeSphere as `reviewer`. Click **Platform** in the top-left corner and select **App Store Management**. On the **App Store** page, click Redis. - - ![remove-app](/images/docs/appstore/application-lifecycle-management/remove-app.png) +1. Log in to KubeSphere as `app-reviewer`. Click **Platform** in the upper-left corner and select **App Store Management**. On the **App Store** page, click Redis. 2. On the detail page, click **Suspend App** and select **OK** in the dialog to confirm the operation to remove the app from the App Store. - ![suspend-app](/images/docs/appstore/application-lifecycle-management/suspend-app.png) - {{< notice note >}} Removing an app from the App Store does not affect tenants who are using the app. @@ -280,12 +200,8 @@ You can choose to remove an app entirely from the App Store or suspend a specifi 3. To make the app available in the App Store again, click **Activate App**. - ![activate-app](/images/docs/appstore/application-lifecycle-management/activate-app.png) - 4. To suspend a specific app version, expand the version menu and click **Suspend Version**. In the dialog that appears, click **OK** to confirm. - ![suspend-version](/images/docs/appstore/application-lifecycle-management/suspend-version.png) - {{< notice note >}} After an app version is suspended, this version is not available in the App Store. Suspending an app version does not affect tenants who are using this version. @@ -294,8 +210,6 @@ You can choose to remove an app entirely from the App Store or suspend a specifi 5. To make the app version available in the App Store again, click **Activate Version**. - ![activate-version](/images/docs/appstore/application-lifecycle-management/activate-version.png) - diff --git a/content/en/docs/application-store/built-in-apps/etcd-app.md b/content/en/docs/application-store/built-in-apps/etcd-app.md index 385852c47..61854d090 100644 --- a/content/en/docs/application-store/built-in-apps/etcd-app.md +++ b/content/en/docs/application-store/built-in-apps/etcd-app.md @@ -13,29 +13,19 @@ This tutorial walks you through an example of deploying etcd from the App Store ## Prerequisites - Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy etcd from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![project-overview](/images/docs/appstore/built-in-apps/etcd-app/project-overview.png) - -2. Find etcd and click **Deploy** on the **App Information** page. - - ![etcd-app-store](/images/docs/appstore/built-in-apps/etcd-app/etcd-app-store.png) - - ![deploy-etcd](/images/docs/appstore/built-in-apps/etcd-app/deploy-etcd.png) +2. Find etcd and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure etcd is deployed in `demo-project` and click **Next**. - ![deployment-location](/images/docs/appstore/built-in-apps/etcd-app/deployment-location.png) - -4. On the **App Configurations** page, specify the size of the persistent volume for etcd and click **Deploy**. - - ![specify-volume](/images/docs/appstore/built-in-apps/etcd-app/specify-volume.png) +4. On the **App Settings** page, specify the size of the persistent volume for etcd and click **Install**. {{< notice note >}} @@ -43,22 +33,16 @@ This tutorial walks you through an example of deploying etcd from the App Store {{}} -5. In **App Templates** of the **Apps** page, wait until etcd is up and running. +5. In **Template-Based Apps** of the **Apps** page, wait until etcd is up and running. - ![etcd-running](/images/docs/appstore/built-in-apps/etcd-app/etcd-running.png) +### Step 2: Access the etcd service -### Step 2: Access the etcd Service - -After the app is deployed, you can use etcdctl, a command-line tool for interacting with etcd server, to access etcd on the KubeSphere console directly. +After the app is deployed, you can use etcdctl, a command-line tool for interacting with the etcd server, to access etcd on the KubeSphere console directly. 1. Navigate to **StatefulSets** in **Workloads**, and click the service name of etcd. - ![etcd-statefulset](/images/docs/appstore/built-in-apps/etcd-app/etcd-statefulset.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![etcd-terminal](/images/docs/appstore/built-in-apps/etcd-app/etcd-terminal.png) - 3. In the terminal, you can read and write data directly. For example, execute the following two commands respectively. ```bash @@ -69,8 +53,6 @@ After the app is deployed, you can use etcdctl, a command-line tool for interact etcdctl get /name ``` - ![etcd-command](/images/docs/appstore/built-in-apps/etcd-app/etcd-command.png) - 4. For clients within the KubeSphere cluster, the etcd service can be accessed through `..svc.:2379` (for example, `etcd-bqe0g4.demo-project.svc.cluster.local:2379` in this guide). 5. For more information, see [the official documentation of etcd](https://etcd.io/docs/v3.4.0/). \ No newline at end of file diff --git a/content/en/docs/application-store/built-in-apps/harbor-app.md b/content/en/docs/application-store/built-in-apps/harbor-app.md index 7cfab45ab..ed09ee7a1 100644 --- a/content/en/docs/application-store/built-in-apps/harbor-app.md +++ b/content/en/docs/application-store/built-in-apps/harbor-app.md @@ -12,27 +12,19 @@ This tutorial walks you through an example of deploying [Harbor](https://goharbo ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy Harbor from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![app-store](/images/docs/appstore/built-in-apps/harbor-app/app-store.png) - -2. Find Harbor and click **Deploy** on the **App Information** page. - - ![find-harbor](/images/docs/appstore/built-in-apps/harbor-app/find-harbor.png) - - ![click-deploy](/images/docs/appstore/built-in-apps/harbor-app/click-deploy.png) +2. Find Harbor and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure Harbor is deployed in `demo-project` and click **Next**. - ![deploy-harbor](/images/docs/appstore/built-in-apps/harbor-app/deploy-harbor.png) - -4. On the **App Configurations** page, edit the configuration file of Harbor. Pay attention to the following fields. +4. On the **App Settings** page, edit the configuration file of Harbor. Pay attention to the following fields. `type`: The method you use to access the Harbor Service. This example uses `nodePort`. @@ -40,8 +32,6 @@ This tutorial walks you through an example of deploying [Harbor](https://goharbo `externalURL`: The URL exposed to tenants. - ![harbor-config](/images/docs/appstore/built-in-apps/harbor-app/harbor-config.png) - {{< notice note >}} - Don't forget to specify `externalURL`. This field can be very helpful if you have trouble accessing Harbor. @@ -50,12 +40,10 @@ This tutorial walks you through an example of deploying [Harbor](https://goharbo {{}} - When you finish editing the configuration, click **Deploy** to continue. + When you finish editing the configuration, click **Install** to continue. 5. Wait until Harbor is up and running. - ![creating-harbor](/images/docs/appstore/built-in-apps/harbor-app/creating-harbor.png) - ### Step 2: Access Harbor 1. Based on the field `expose.type` you set in the configuration file, the access method may be different. As this example uses `nodePort` to access Harbor, visit `http://:30002` as set in the previous step. diff --git a/content/en/docs/application-store/built-in-apps/memcached-app.md b/content/en/docs/application-store/built-in-apps/memcached-app.md index 87bfa206c..ce7699672 100644 --- a/content/en/docs/application-store/built-in-apps/memcached-app.md +++ b/content/en/docs/application-store/built-in-apps/memcached-app.md @@ -12,43 +12,27 @@ This tutorial walks you through an example of deploying Memcached from the App S ## Prerequisites - Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy Memcached from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![in-app-store](/images/docs/appstore/built-in-apps/memcached-app/in-app-store.png) - -2. Find Memcached and click **Deploy** on the **App Information** page. - - ![memcached-app-store](/images/docs/appstore/built-in-apps/memcached-app/memcached-app-store.png) - - ![deploying-memcached](/images/docs/appstore/built-in-apps/memcached-app/deploying-memcached.png) +2. Find Memcached and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure Memcached is deployed in `demo-project` and click **Next**. - ![deployment-confirm](/images/docs/appstore/built-in-apps/memcached-app/deployment-confirm.png) - -4. In **App Configurations**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Deploy** to continue. - - ![edit-config](/images/docs/appstore/built-in-apps/memcached-app/edit-config.png) +4. In **App Settings**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Install** to continue. 5. Wait until Memcached is up and running. - ![memcached-running](/images/docs/appstore/built-in-apps/memcached-app/memcached-running.png) - ### Step 2: Access Memcached 1. Navigate to **Services**, and click the service name of Memcached. - ![memcached-service](/images/docs/appstore/built-in-apps/memcached-app/memcached-service.png) - -2. On the detail page, you can find the port number and Pod IP under **Service Ports** and **Pods** respectively. - - ![memcached-port-pod](/images/docs/appstore/built-in-apps/memcached-app/memcached-port-pod.png) +2. On the detail page, you can find the port number and Pod's IP address under **Ports** and **Pods** respectively. 3. As the Memcached service is headless, access it inside the cluster through the Pod IP and port number. The basic syntax of Memcached `telnet` command is `telnet HOST PORT`. For example: diff --git a/content/en/docs/application-store/built-in-apps/minio-app.md b/content/en/docs/application-store/built-in-apps/minio-app.md index d4244971b..80cf24f55 100644 --- a/content/en/docs/application-store/built-in-apps/minio-app.md +++ b/content/en/docs/application-store/built-in-apps/minio-app.md @@ -12,59 +12,35 @@ This tutorial walks you through an example of deploying MinIO from the App Store ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy MinIO from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![minio-app](/images/docs/appstore/built-in-apps/minio-app/minio-app.png) - -2. Find MinIO and click **Deploy** on the **App Information** page. - - ![minio-in-app-store](/images/docs/appstore/built-in-apps/minio-app/minio-in-app-store.png) - - ![deploy-minio](/images/docs/appstore/built-in-apps/minio-app/deploy-minio.png) +2. Find MinIO and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure MinIO is deployed in `demo-project` and click **Next**. - ![minio-deploy](/images/docs/appstore/built-in-apps/minio-app/minio-deploy.png) - -4. In **App Configurations**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Deploy** to continue. - - ![deloy-minio-2](/images/docs/appstore/built-in-apps/minio-app/deloy-minio-2.png) +4. In **App Settings**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Install** to continue. 5. Wait until MinIO is up and running. - ![minio-in-list](/images/docs/appstore/built-in-apps/minio-app/minio-in-list.png) - -### Step 2: Access the MinIO Browser +### Step 2: Access the MinIO browser To access MinIO outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of MinIO. - ![minio-detail](/images/docs/appstore/built-in-apps/minio-app/minio-detail.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/appstore/built-in-apps/minio-app/edit-internet-access.png) +2. Click **More** and select **Edit External Access** from the drop-down menu. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/appstore/built-in-apps/minio-app/nodeport.png) +4. On the **Services** page, click **MinIO**. On the page that appears, under **Ports**, you can see the port is exposed. -4. Under **Service Ports**, you can see the port is exposed. - - ![port-exposed](/images/docs/appstore/built-in-apps/minio-app/port-exposed.png) - -5. To access the MinIO browser, you need `accessKey` and `secretKey`, which are specified in the configuration file of MinIO. Go to **App Templates** in **Apps**, click MinIO, and you can find the value of these two fields under the tab **Configuration Files**. - - ![template-list](/images/docs/appstore/built-in-apps/minio-app/template-list.png) - - ![config-file](/images/docs/appstore/built-in-apps/minio-app/config-file.png) +5. To access the MinIO browser, you need `accessKey` and `secretKey`, which are specified in the configuration file of MinIO. Go to **Template-Based Apps** in **Apps**, click MinIO, and you can find the value of these two fields under the tab **Chart Files**. 6. Access the MinIO browser through `:` using `accessKey` and `secretKey`. @@ -74,7 +50,7 @@ To access MinIO outside the cluster, you need to expose the app through a NodePo {{< notice note >}} - You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/mongodb-app.md b/content/en/docs/application-store/built-in-apps/mongodb-app.md index 11d7020f5..62dbc023c 100644 --- a/content/en/docs/application-store/built-in-apps/mongodb-app.md +++ b/content/en/docs/application-store/built-in-apps/mongodb-app.md @@ -13,57 +13,41 @@ This tutorial walks you through an example of deploying MongoDB from the App Sto ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy MongoDB from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![app-store](/images/docs/appstore/built-in-apps/mongodb-app/app-store.png) - -2. Find MongoDB and click **Deploy** on the **App Information** page. - - ![mongodb-in-app-store](/images/docs/appstore/built-in-apps/mongodb-app/mongodb-in-app-store.png) - - ![deploy-mongodb](/images/docs/appstore/built-in-apps/mongodb-app/deploy-mongodb.png) +2. Find MongoDB and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure MongoDB is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/appstore/built-in-apps/mongodb-app/confirm-deployment.png) - -4. In **App Configurations**, specify persistent volumes for the app and record the username and the password which will be used to access the app. When you finish, click **Deploy**. - - ![set-app-configuration](/images/docs/appstore/built-in-apps/mongodb-app/set-app-configuration.png) +4. In **App Settings**, specify persistent volumes for the app and record the username and the password which will be used to access the app. When you finish, click **Install**. {{< notice note >}} - To specify more values for MongoDB, use the toggle switch to see the app’s manifest in YAML format and edit its configurations. + To specify more values for MongoDB, use the toggle switch to see the app's manifest in YAML format and edit its configurations. {{}} 5. Wait until MongoDB is up and running. - ![mongodb-running](/images/docs/appstore/built-in-apps/mongodb-app/mongodb-running.png) - ### Step 2: Access the MongoDB Terminal 1. Go to **Services** and click the service name of MongoDB. - ![mongodb-service](/images/docs/appstore/built-in-apps/mongodb-app/mongodb-service.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![mongodb-terminal](/images/docs/appstore/built-in-apps/mongodb-app/mongodb-terminal.png) - 3. In the pop-up window, enter commands in the terminal directly to use the app. ![mongodb-service-terminal](/images/docs/appstore/built-in-apps/mongodb-app/mongodb-service-terminal.jpg) {{< notice note >}} - If you want to access MongoDB outside the cluster, click **More** and select **Edit Internet Access**. In the dialog that appears, select **NodePort** as the access mode. Use the port number to access MongoDB after it is exposed. You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + If you want to access MongoDB outside the cluster, click **More** and select **Edit External Access**. In the dialog that appears, select **NodePort** as the access mode. Use the port number to access MongoDB after it is exposed. You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/mysql-app.md b/content/en/docs/application-store/built-in-apps/mysql-app.md index 6b6b74cd0..2394bc1ba 100644 --- a/content/en/docs/application-store/built-in-apps/mysql-app.md +++ b/content/en/docs/application-store/built-in-apps/mysql-app.md @@ -6,74 +6,50 @@ description: 'Learn how to deploy MySQL from the App Store of KubeSphere and acc link title: "Deploy MySQL" weight: 14260 --- -[MySQL](https://www.mysql.com/) is an open-source relational database management system (RDBMS), which uses the most commonly used database management language - Structured Query Language (SQL) for database management. It provides a fully managed database service to deploy cloud-native applications using the world’s most popular open-source database. +[MySQL](https://www.mysql.com/) is an open-source relational database management system (RDBMS), which uses the most commonly used database management language - Structured Query Language (SQL) for database management. It provides a fully managed database service to deploy cloud-native applications using the world's most popular open-source database. This tutorial walks you through an example of deploying MySQL from the App Store of KubeSphere. ## Prerequisites - Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy MySQL from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![go-to-app-store](/images/docs/appstore/built-in-apps/mysql-app/go-to-app-store.png) - -2. Find MySQL and click **Deploy** on the **App Information** page. - - ![find-mysql](/images/docs/appstore/built-in-apps/mysql-app/find-mysql.png) - - ![click-deploy](/images/docs/appstore/built-in-apps/mysql-app/click-deploy.png) +2. Find MySQL and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure MySQL is deployed in `demo-project` and click **Next**. - ![deploy-mysql](/images/docs/appstore/built-in-apps/mysql-app/deploy-mysql.png) - -4. In **App Configurations**, uncomment the `mysqlRootPassword` field and customize the password. Click **Deploy** to continue. - - ![uncomment-password](/images/docs/appstore/built-in-apps/mysql-app/uncomment-password.png) +4. In **App Settings**, uncomment the `mysqlRootPassword` field and customize the password. Click **Install** to continue. 5. Wait until MySQL is up and running. - ![mysql-running](/images/docs/appstore/built-in-apps/mysql-app/mysql-running.png) - -### Step 2: Access the MySQL Terminal +### Step 2: Access the MySQL terminal 1. Go to **Workloads** and click the workload name of MySQL. - ![mysql-workload](/images/docs/appstore/built-in-apps/mysql-app/mysql-workload.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![mysql-teminal](/images/docs/appstore/built-in-apps/mysql-app/mysql-teminal.png) - 3. In the terminal, execute `mysql -uroot -ptesting` to log in to MySQL as the root user. ![log-in-mysql](/images/docs/appstore/built-in-apps/mysql-app/log-in-mysql.png) -### Step 3: Access the MySQL Database outside the Cluster +### Step 3: Access the MySQL database outside the cluster To access MySQL outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of MySQL. - ![mysql-service](/images/docs/appstore/built-in-apps/mysql-app/mysql-service.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/appstore/built-in-apps/mysql-app/edit-internet-access.png) +2. Click **More** and select **Edit External Access** from the drop-down list. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport-mysql](/images/docs/appstore/built-in-apps/mysql-app/nodeport-mysql.png) - -4. Under **Service Ports**, you can see the port is exposed. The port and public IP will be used in the next step to access the MySQL database. - - ![mysql-port-number](/images/docs/appstore/built-in-apps/mysql-app/mysql-port-number.png) +4. Under **Ports**, you can see the port is exposed. The port and public IP address will be used in the next step to access the MySQL database. 5. To access your MySQL database, you need to use the MySQL client or install a third-party application such as SQLPro Studio for the connection. The following example demonstrates how to access the MySQL database through SQLPro Studio. @@ -83,7 +59,7 @@ To access MySQL outside the cluster, you need to expose the app through a NodePo {{< notice note >}} - You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/nginx-app.md b/content/en/docs/application-store/built-in-apps/nginx-app.md index 916c7aaef..f009e0087 100644 --- a/content/en/docs/application-store/built-in-apps/nginx-app.md +++ b/content/en/docs/application-store/built-in-apps/nginx-app.md @@ -13,31 +13,19 @@ This tutorial walks you through an example of deploying NGINX from the App Store ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy NGINX from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![app-store](/images/docs/appstore/built-in-apps/nginx-app/app-store.png) - -2. Find NGINX and click **Deploy** on the **App Information** page. - - ![nginx-in-app-store](/images/docs/appstore/built-in-apps/nginx-app/nginx-in-app-store.png) - - ![deploy-nginx](/images/docs/appstore/built-in-apps/nginx-app/deploy-nginx.png) +2. Find NGINX and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure NGINX is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/appstore/built-in-apps/nginx-app/confirm-deployment.png) - -4. In **App Configurations**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Deploy**. - - ![edit-config-nginx](/images/docs/appstore/built-in-apps/nginx-app/edit-config-nginx.png) - - ![manifest-file](/images/docs/appstore/built-in-apps/nginx-app/manifest-file.png) +4. In **App Settings**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Install**. {{< notice note >}} @@ -47,27 +35,17 @@ This tutorial walks you through an example of deploying NGINX from the App Store 5. Wait until NGINX is up and running. - ![nginx-running](/images/docs/appstore/built-in-apps/nginx-app/nginx-running.png) - ### Step 2: Access NGINX To access NGINX outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of NGINX. - ![nginx-service](/images/docs/appstore/built-in-apps/nginx-app/nginx-service.png) - -2. On the service detail page, click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/appstore/built-in-apps/nginx-app/edit-internet-access.png) +2. On the service details page, click **More** and select **Edit External Access** from the drop-down list. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/appstore/built-in-apps/nginx-app/nodeport.png) - -4. Under **Service Ports**, you can see the port is exposed. - - ![exposed-port](/images/docs/appstore/built-in-apps/nginx-app/exposed-port.png) +4. Under **Ports**, you can see the port is exposed. 5. Access NGINX through `:`. @@ -75,7 +53,7 @@ To access NGINX outside the cluster, you need to expose the app through a NodePo {{< notice note >}} - You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/postgresql-app.md b/content/en/docs/application-store/built-in-apps/postgresql-app.md index 0ba6d90ce..8eb6a6b58 100644 --- a/content/en/docs/application-store/built-in-apps/postgresql-app.md +++ b/content/en/docs/application-store/built-in-apps/postgresql-app.md @@ -6,76 +6,54 @@ linkTitle: "Deploy PostgreSQL on KubeSphere" weight: 14280 --- -[PostgreSQL](https://www.postgresql.org/) is a powerful, open-source object-relational database system which is famous for reliability, feature robustness, and performance. +[PostgreSQL](https://www.postgresql.org/) is a powerful, open-source object-relational database system, which is famous for reliability, feature robustness, and performance. This tutorial walks you through an example of how to deploy PostgreSQL from the App Store of KubeSphere. ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy PostgreSQL from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![click-app-store](/images/docs/appstore/built-in-apps/postgresql-app/click-app-store.png) - -2. Find PostgreSQL and click **Deploy** on the **App Information** page. - - ![postgresql-in-app-store](/images/docs/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png) - - ![deploy-postgresql](/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql.png) +2. Find PostgreSQL and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure PostgreSQL is deployed in `demo-project` and click **Next**. - ![deploy-postgresql-2](/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png) - -4. In **App Configurations**, specify persistent volumes for the app and record the username and the password which will be used later to access the app. When you finish, click **Deploy**. - - ![set-config](/images/docs/appstore/built-in-apps/postgresql-app/set-config.png) +4. In **App Settings**, specify persistent volumes for the app and record the username and the password, which will be used later to access the app. When you finish, click **Install**. {{< notice note >}} - To specify more values for PostgreSQL, use the toggle switch to see the app’s manifest in YAML format and edit its configurations. + To specify more values for PostgreSQL, use the toggle switch to see the app's manifest in YAML format and edit its configurations. {{}} 5. Wait until PostgreSQL is up and running. - ![postgresql-ready](/images/docs/appstore/built-in-apps/postgresql-app/postgresql-ready.png) - -### Step 2: Access the PostgreSQL Database +### Step 2: Access the PostgreSQL database To access PostgreSQL outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of PostgreSQL. - ![access-postgresql](/images/docs/appstore/built-in-apps/postgresql-app/access-postgresql.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/appstore/built-in-apps/postgresql-app/edit-internet-access.png) +2. Click **More** and select **Edit External Access** from the drop-down list. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/appstore/built-in-apps/postgresql-app/nodeport.png) +4. Under **Ports**, you can see the port is exposed, which will be used in the next step to access the PostgreSQL database. -4. Under **Service Ports**, you can see the port is exposed, which will be used in the next step to access the PostgreSQL database. - - ![port-number](/images/docs/appstore/built-in-apps/postgresql-app/port-number.png) - -5. Expand the Pod menu under **Pods** and click the Terminal icon. In the pop-up window, enter commands directly to access the database. - - ![container-terminal](/images/docs/appstore/built-in-apps/postgresql-app/container-terminal.png) +5. Expand the Pod menu under **Pods** and click the **Terminal** icon. In the pop-up window, enter commands directly to access the database. ![postgresql-output](/images/docs/appstore/built-in-apps/postgresql-app/postgresql-output.png) {{< notice note >}} - You can also use a third-party application such as SQLPro Studio to connect to the database. You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You can also use a third-party application such as SQLPro Studio to connect to the database. You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/rabbitmq-app.md b/content/en/docs/application-store/built-in-apps/rabbitmq-app.md index 30d3c5837..f344bf6df 100644 --- a/content/en/docs/application-store/built-in-apps/rabbitmq-app.md +++ b/content/en/docs/application-store/built-in-apps/rabbitmq-app.md @@ -12,7 +12,7 @@ This tutorial walks you through an example of how to deploy RabbitMQ from the Ap ## Prerequisites - Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -20,62 +20,41 @@ This tutorial walks you through an example of how to deploy RabbitMQ from the Ap 1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. - ![rabbitmq01](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png) - -2. Find RabbitMQ and click **Deploy** on the **App Information** page. - - ![find-rabbitmq](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png) - - ![click-deploy](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png) +2. Find RabbitMQ and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure RabbitMQ is deployed in `demo-project` and click **Next**. - ![rabbitmq03](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png) - -4. In **App Configurations**, you can use the default configuration directly or customize the configuration either by specifying fields in a form or editing the YAML file. Record the value of **Root Username** and the value of **Root Password**, which will be used later for login. Click **Deploy** to continue. - - ![rabbitMQ11](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png) - - ![rabbitMQ04](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png) +4. In **App Settings**, you can use the default settings directly or customize the settings either by specifying fields in a form or editing the YAML file. Record the value of **Root Username** and the value of **Root Password**, which will be used later for login. Click **Install** to continue. {{< notice tip >}} - To see the manifest file, toggle the **YAML** switch. + To see the manifest file, toggle the **Edit YAML** switch. {{}} 5. Wait until RabbitMQ is up and running. - ![check-if-rabbitmq-is-running](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png) - -### Step 2: Access the RabbitMQ Dashboard +### Step 2: Access the RabbitMQ dashboard To access RabbitMQ outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of RabbitMQ. - ![go-to-services](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![rabbitmq07](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png) +2. Click **More** and select **Edit External Access** from the drop-down list. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![rabbitmq08](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png) - -4. Under **Service Ports**, you can see ports are exposed. - - ![rabbitmq09](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png) +4. Under **Ports**, you can see ports are exposed. 5. Access RabbitMQ **management** through `:`. Note that the username and password are those you set in **Step 1**. + ![rabbitmq-dashboard](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.png) ![rabbitma-dashboard-detail](/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.png) {{< notice note >}} - You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/built-in-apps/radondb-mysql-app.md b/content/en/docs/application-store/built-in-apps/radondb-mysql-app.md index e45705ad2..fa9286dd4 100644 --- a/content/en/docs/application-store/built-in-apps/radondb-mysql-app.md +++ b/content/en/docs/application-store/built-in-apps/radondb-mysql-app.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to deploy RadonDB MySQL from the App Store of Kub ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -21,34 +21,20 @@ This tutorial demonstrates how to deploy RadonDB MySQL from the App Store of Kub 1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. -2. Find RadonDB MySQL and click **Deploy** on the **App Information** page. - - ![RadonDB MySQL-in-app-store](/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png) - - ![deploy-RadonDB MySQL](/images/docs/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png) +2. Find RadonDB MySQL and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure RadonDB MySQL is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png) - -4. In **App Configurations**, you can use the default configuration or customize the configuration by editing the YAML file directly. When you finish, click **Deploy**. - - ![set-app-configuration](/images/docs/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png) +4. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file directly. When you finish, click **Install**. 5. Wait until RadonDB MySQL is up and running. - ![RadonDB MySQL-running](/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png) - ### Step 2: Access RadonDB MySQL 1. In **Services** under **Application Workloads**, click the Service name of RadonDB MySQL. - ![RadonDB MySQL-service](/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![RadonDB MySQL-terminal](/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png) - 3. In the pop-up window, enter commands in the terminal directly to use the app. ![Access RadonDB MySQL](/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service-terminal.png) diff --git a/content/en/docs/application-store/built-in-apps/radondb-postgresql-app.md b/content/en/docs/application-store/built-in-apps/radondb-postgresql-app.md index a47590174..0ce592b10 100644 --- a/content/en/docs/application-store/built-in-apps/radondb-postgresql-app.md +++ b/content/en/docs/application-store/built-in-apps/radondb-postgresql-app.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to deploy RadonDB PostgreSQL from the App Store o ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -23,49 +23,27 @@ This tutorial demonstrates how to deploy RadonDB PostgreSQL from the App Store o 2. Click **Database & Cache** under **Categories**. - ![RadonDB PostgreSQL-in-app-store](/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png) - -3. Find RadonDB PostgreSQL and click **Deploy** on the **App Information** page. - - ![deploy-RadonDB PostgreSQL](/images/docs/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png) +3. Find RadonDB PostgreSQL and click **Install** on the **App Information** page. 4. Set a name and select an app version. Make sure RadonDB PostgreSQL is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png) - -5. In **App Configurations**, you can use the default configuration or customize the configuration by editing the YAML file. When you finish, click **Deploy**. - - ![set-app-configuration](/images/docs/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png) +5. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file. When you finish, click **Install**. 6. Wait until RadonDB PostgreSQL is up and running. - ![RadonDB PostgreSQL-running](/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png) - -### Step 2: View PostgreSQL Cluster status +### Step 2: View PostgreSQL cluster status 1. On the **Overview** page of the project `demo-project`, you can see a list of resource usage in the current project. - ![project-overview](/images/docs/appstore/built-in-apps/radondb-postgresql-app/project-overview.png) - -2. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab and you can see the StatefulSet is up and running. - - ![statefulsets-running](/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png) +2. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab, and then you can see the StatefulSet is up and running. Click the StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. - ![statefulset-monitoring](/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png) - 3. In **Pods** under **Application Workloads**, you can see all the Pods are up and running. - ![pods-running](/images/docs/appstore/built-in-apps/radondb-postgresql-app/pods-running.png) - 4. In **Volumes** under **Storage**, you can see the PostgreSQL Cluster components are using persistent volumes. - ![volumes](/images/docs/appstore/built-in-apps/radondb-postgresql-app/volumes.png) - - Volume usage is also monitored. Click a volume item to go to its detail page. Here is an example of one of the data nodes. - - ![volume-status](/images/docs/appstore/built-in-apps/radondb-postgresql-app/volume-status.png) + Volume usage is also monitored. Click a volume item to go to its detail page. ### Step 3: Access RadonDB PostgreSQL @@ -73,8 +51,6 @@ This tutorial demonstrates how to deploy RadonDB PostgreSQL from the App Store o 2. On the **Resource Status** page, click the **Terminal** icon. - ![RadonDB PostgreSQL-terminal](/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png) - 3. In the displayed dialog box, run the following command and enter the user password in the terminal to use the app. ```bash diff --git a/content/en/docs/application-store/built-in-apps/redis-app.md b/content/en/docs/application-store/built-in-apps/redis-app.md index f3166fded..17229a9da 100644 --- a/content/en/docs/application-store/built-in-apps/redis-app.md +++ b/content/en/docs/application-store/built-in-apps/redis-app.md @@ -13,50 +13,34 @@ This tutorial walks you through an example of deploying Redis from the App Store ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy Redis from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![app-store](/images/docs/appstore/built-in-apps/redis-app/app-store.png) - -2. Find Redis and click **Deploy** on the **App Information** page. - - ![redis-in-app-store](/images/docs/appstore/built-in-apps/redis-app/redis-in-app-store.png) - - ![deploy-redis](/images/docs/appstore/built-in-apps/redis-app/deploy-redis.png) +2. Find Redis and click **Install** on the **App Information** page. 3. Set a name and select an app version. Make sure Redis is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/appstore/built-in-apps/redis-app/confirm-deployment.png) - -4. In **App Configurations**, specify persistent volumes and a password for the app. When you finish, click **Deploy**. - - ![configure-redis](/images/docs/appstore/built-in-apps/redis-app/configure-redis.png) +4. In **App Settings**, specify persistent volumes and a password for the app. When you finish, click **Install**. {{< notice note >}} - To specify more values for Redis, use the toggle switch to see the app’s manifest in YAML format and edit its configurations. + To specify more values for Redis, use the toggle switch to see the app's manifest in YAML format and edit its settings. {{}} 5. Wait until Redis is up and running. - ![redis-running](/images/docs/appstore/built-in-apps/redis-app/redis-running.png) - -### Step 2: Access the Redis Terminal +### Step 2: Access the Redis terminal 1. Go to **Services** and click the service name of Redis. - ![access-redis](/images/docs/appstore/built-in-apps/redis-app/access-redis.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![redis-terminal](/images/docs/appstore/built-in-apps/redis-app/redis-terminal.png) - 3. In the pop-up window, use the `redis-cli` command in the terminal to use the app. ![use-redis](/images/docs/appstore/built-in-apps/redis-app/use-redis.png) diff --git a/content/en/docs/application-store/built-in-apps/tomcat-app.md b/content/en/docs/application-store/built-in-apps/tomcat-app.md index bfe2a3ac7..30079334d 100644 --- a/content/en/docs/application-store/built-in-apps/tomcat-app.md +++ b/content/en/docs/application-store/built-in-apps/tomcat-app.md @@ -12,67 +12,43 @@ This tutorial walks you through an example of deploying Tomcat from the App Stor ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy Tomcat from the App Store -1. On the **Overview** page of the project `demo-project`, click **App Store** in the top-left corner. +1. On the **Overview** page of the project `demo-project`, click **App Store** in the upper-left corner. - ![go-to-app-store](/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.png) +2. Find Tomcat and click **Install** on the **App Information** page. -2. Find Tomcat and click **Deploy** on the **App Information** page. +1. Set a name and select an app version. Make sure Tomcat is deployed in `demo-project` and click **Next**. - ![find-tomcat](/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.png) +2. In **App Settings**, you can use the default settings or customize the settings by editing the YAML file directly. Click **Install** to continue. - ![click-deploy](/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.png) +3. Wait until Tomcat is up and running. -3. Set a name and select an app version. Make sure Tomcat is deployed in `demo-project` and click **Next**. - - ![click-next](/images/docs/appstore/built-in-apps/tomcat-app/click-next.png) - -4. In **App Configurations**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Deploy** to continue. - - ![deploy-tomcat](/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.png) - -5. Wait until Tomcat is up and running. - - ![tomcat-running](/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.png) - -### Step 2: Access the Tomcat Terminal +### Step 2: Access the Tomcat terminal 1. Go to **Services** and click the service name of Tomcat. - ![click-tomcat-service](/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.png) - 2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon. - ![tomcat-teminal-icon](/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.png) - 3. You can view deployed projects in `/usr/local/tomcat/webapps`. ![view-project](/images/docs/appstore/built-in-apps/tomcat-app/view-project.png) -### Step 3: Access a Tomcat Project from Your Browser +### Step 3: Access a Tomcat project from your browser To access a Tomcat project outside the cluster, you need to expose the app through a NodePort first. 1. Go to **Services** and click the service name of Tomcat. - ![click-tomcat-service](/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.png) +2. Click **More** and select **Edit External Access** from the drop-down list. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/appstore/built-in-apps/tomcat-app/nodeport.png) - -4. Under **Service Ports**, you can see the port is exposed. - - ![exposed-port](/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.png) +4. Under **Ports**, you can see the port is exposed. 5. Access the sample Tomcat project through `:/sample` in your browser. @@ -80,7 +56,7 @@ To access a Tomcat project outside the cluster, you need to expose the app throu {{< notice note >}} - You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. + You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. {{}} diff --git a/content/en/docs/application-store/external-apps/deploy-clickhouse.md b/content/en/docs/application-store/external-apps/deploy-clickhouse.md index ea7258a77..7e9dfb6e5 100644 --- a/content/en/docs/application-store/external-apps/deploy-clickhouse.md +++ b/content/en/docs/application-store/external-apps/deploy-clickhouse.md @@ -13,17 +13,17 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl ## Prerequisites - You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. This tutorial uses `demo-workspace` and `demo-project` for demonstration. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. This tutorial uses `demo-workspace` and `demo-project` for demonstration. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to enable the gateway in your project to provide external access. If they are not ready, refer to [Project Gateway](../../../project-administration/project-gateway/). ## Hands-on Lab ### Step 1: Deploy ClickHouse Operator -1. Log in to the KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the bottom-right corner to run the following command to install ClickHouse Operator. It is recommended that you have at least two worker nodes available in your cluster. +1. Log in to the KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the lower-right corner to run the following command to install ClickHouse Operator. It is recommended that you have at least two worker nodes available in your cluster. ```bash - kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/master/clickhouse-operator-install.yml + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/master/clickhouse-operator-install.yml ``` {{< notice note >}} @@ -34,11 +34,13 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl 2. You can see the expected output as below if the installation is successful. - ``` - customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.qingcloud.com created - customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.qingcloud.com created - customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.qingcloud.com created + ```powershell + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.radondb.com created serviceaccount/clickhouse-operator created + clusterrole.rbac.authorization.k8s.io/clickhouse-operator-kube-system created clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator-kube-system created configmap/etc-clickhouse-operator-files created configmap/etc-clickhouse-operator-confd-files created @@ -52,7 +54,7 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl 3. You can run the following command to view the status of ClickHouse Operator resources. ```bash - kubectl get all --selector=app=clickhouse-operator -n kube-system + $ kubectl get all --selector=app=clickhouse-operator -n kube-system ``` Expected output: @@ -75,80 +77,48 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl 1. Log out of KubeSphere and log back in as `ws-admin`. In `demo-workspace`, go to **App Repositories** under **App Management**, and then click **Add**. - ![add-repo](/images/docs/appstore/external-apps/deploy-clickhouse/add-repo.png) +2. In the dialog that appears, enter `clickhouse` for the app repository name and `https://radondb.github.io/radondb-clickhouse-kubernetes/` for the repository URL. Click **Validate** to verify the URL, and you will see a green check mark next to the URL if it is available. Click **OK** to continue. -2. In the dialog that appears, enter `clickhouse` for the app repository name and `https://radondb.github.io/radondb-clickhouse-kubernetes/` for the repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. - - ![add-clickhouse](/images/docs/appstore/external-apps/deploy-clickhouse/add-clickhouse.png) - -3. Your repository displays in the list after successfully imported to KubeSphere. - - ![repo-added](/images/docs/appstore/external-apps/deploy-clickhouse/repo-added.png) +3. Your repository will display in the list after it is successfully imported to KubeSphere. ### Step 3: Deploy a ClickHouse Cluster -1. Log out of KubeSphere and log back in as `project-regular`. In `demo-project`, go to **Apps** under **Application Workloads** and click **Deploy New App**. +1. Log out of KubeSphere and log back in as `project-regular`. In `demo-project`, go to **Apps** under **Application Workloads** and click **Create**. - ![click-deploy-new-app](/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png) - -2. In the dialog that appears, select **From App Templates**. - - ![from-app-templates](/images/docs/appstore/external-apps/deploy-clickhouse/from-app-templates.png) +2. In the dialog that appears, select **From App Template**. 3. On the new page that appears, select **clickhouse** from the drop-down list and then click **clickhouse-cluster**. - ![clickhouse-cluster](/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png) - -4. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Deploy** to continue. - - ![chart-tab](/images/docs/appstore/external-apps/deploy-clickhouse/chart-tab.png) +4. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Install** to continue. 5. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. - ![basic-info](/images/docs/appstore/external-apps/deploy-clickhouse/basic-info.png) +6. On the **App Settings** tab, you can change the YAML file to customize settings. In this tutorial, click **Install** to use the default settings. -6. On the **App Configurations** tab, you can change the YAML file to customize configurations. In this tutorial, click **Deploy** to use the default configurations. +7. After a while, you can see the app is in the **Running** status. - ![click-deploy](/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy.png) +### Step 4: View ClickHouse cluster status -7. After a while, you can see the app status shown as **Running**. +1. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab, and you can see the StatefulSets are up and running. - ![app-running](/images/docs/appstore/external-apps/deploy-clickhouse/app-running.png) - -### Step 4: View ClickHouse Cluster status - -1. In **Workloads** under **Application Workloads**, click the **StatefulSets** tab and you can see the StatefulSets are up and running. - - ![statefulsets-running](/images/docs/appstore/external-apps/deploy-clickhouse/statefulsets-running.png) - -3. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. - - ![statefulset-monitoring](/images/docs/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png) +2. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. 3. In **Pods** under **Application Workloads**, you can see all the Pods are up and running. - ![pods-running](/images/docs/appstore/external-apps/deploy-clickhouse/pods-running.png) - 4. In **Volumes** under **Storage**, you can see the ClickHouse Cluster components are using persistent volumes. - ![volumes](/images/docs/appstore/external-apps/deploy-clickhouse/volumes.png) - -5. Volume usage is also monitored. Click a volume item to go to its detail page. Here is an example of one of the data nodes. - - ![volume-status](/images/docs/appstore/external-apps/deploy-clickhouse/volume-status.png) +5. Volume usage is also monitored. Click a volume item to go to its detail page. 6. On the **Overview** page of the project, you can see a list of resource usage in the current project. - ![project-overview](/images/docs/appstore/external-apps/deploy-clickhouse/project-overview.png) +### Step 5: Access the ClickHouse cluster -### Step 5: Access the ClickHouse Cluster - -1. Log out of KubeSphere and log back in as `admin`. Hover your cursor over the hammer icon in the bottom-right corner and then select **Kubectl**. +1. Log out of KubeSphere and log back in as `admin`. Hover your cursor over the hammer icon in the lower-right corner, and then select **Kubectl**. 2. In the window that appears, run the following command and then navigate to the username and password of the ClickHouse cluster. ```bash - kubectl edit chi clickho-749j8s -n demo-project + $ kubectl edit chi clickho-749j8s -n demo-project ``` ![get-username-password](/images/docs/appstore/external-apps/deploy-clickhouse/get-username-password.png) @@ -162,14 +132,13 @@ This tutorial demonstrates how to deploy ClickHouse Operator and a ClickHouse Cl 3. Run the following command to access the ClickHouse cluster, and then you can use command like `show databases` to interact with it. ```bash - kubectl exec -it chi-clickho-749j8s-all-nodes-0-0-0 -n demo-project -- clickhouse-client --user=clickhouse --password=c1ickh0use0perator + $ kubectl exec -it chi-clickho-749j8s-all-nodes-0-0-0 -n demo-project -- clickhouse-client --user=clickhouse --password=c1ickh0use0perator ``` ![use-clickhouse](/images/docs/appstore/external-apps/deploy-clickhouse/use-clickhouse.png) {{< notice note >}} - In the above command, `chi-clickho-749j8s-all-nodes-0-0-0` is the Pod name and you can find it in **Pods** under **Application Workloads**. Make sure you use your own Pod name, project name, username and password. + In the above command, `chi-clickho-749j8s-all-nodes-0-0-0` is the Pod name and you can find it in **Pods** under **Application Workloads**. Make sure you use your own Pod name, project name, username, and password. {{}} - diff --git a/content/en/docs/application-store/external-apps/deploy-gitlab.md b/content/en/docs/application-store/external-apps/deploy-gitlab.md index 84753bc09..2cea93f76 100644 --- a/content/en/docs/application-store/external-apps/deploy-gitlab.md +++ b/content/en/docs/application-store/external-apps/deploy-gitlab.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. ## Prerequisites - You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and two accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and two accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -21,39 +21,23 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. 1. Log in to KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. - ![add-repo](/images/docs/appstore/external-apps/deploy-gitlab/add_repo.png) - 2. In the displayed dialog box, enter `main` for the app repository name and `https://charts.kubesphere.io/main` for the app repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. - ![add-main-repo](/images/docs/appstore/external-apps/deploy-gitlab/add-main_repo.png) - -3. The repository is displayed in the list after successfully imported to KubeSphere. - - ![added-main-repo](/images/docs/appstore/external-apps/deploy-gitlab/added-main_repo.png) +3. The repository displays in the list after it is successfully imported to KubeSphere. ### Step 2: Deploy GitLab -1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Deploy New App**. +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. - ![deploy-app](/images/docs/appstore/external-apps/deploy-gitlab/deploy_app.png) - -2. In the displayed dialog box, select **From App Templates**. - - ![from-app-templates](/images/docs/appstore/external-apps/deploy-gitlab/from-app_templates.png) +2. In the dialog box that appears, select **From App Template**. 3. Select `main` from the drop-down list, then click **gitlab**. - ![click-gitlab](/images/docs/appstore/external-apps/deploy-gitlab/click_gitlab.png) - -4. On the **App Information** tab and the **Chart Files** tab, you can view the default configuration from the console. Click **Deploy** to continue. - - ![view-config](/images/docs/appstore/external-apps/deploy-gitlab/view_config.png) +4. On the **App Information** tab and the **Chart Files** tab, you can view the default settings on the console. Click **Install** to continue. 5. On the **Basic Information** page, you can view the app name, app version, and deployment location. This tutorial uses the version `4.2.3 [13.2.2]`. Click **Next** to continue. - ![basic-info](/images/docs/appstore/external-apps/deploy-gitlab/basic_info.png) - -6. On the **App Configurations** page, use the following configurations to replace the default configurations, and then click **Deploy**. +6. On the **App Settings** page, use the following settings to replace the default ones, and then click **Install**. ```yaml global: @@ -66,8 +50,6 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. helmTests: enabled: false ``` - - ![change-value](/images/docs/appstore/external-apps/deploy-gitlab/change_value.png) {{< notice note >}} @@ -77,14 +59,8 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. 7. Wait for GitLab to be up and running. - ![gitlab-running](/images/docs/appstore/external-apps/deploy-gitlab/gitlab_running.png) - 8. Go to **Workloads**, and you can see all the Deployments and StatefulSets created for GitLab. - ![deployments-running](/images/docs/appstore/external-apps/deploy-gitlab/deployments_running.png) - - ![statefulsets-running](/images/docs/appstore/external-apps/deploy-gitlab/statefulsets_running.png) - {{< notice note >}} It may take a while before all the Deployments and StatefulSets are up and running. @@ -93,25 +69,21 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. ### Step 3: Get the root user's password -1. Go to **Secrets** under **Configurations**, enter `gitlab-initial-root-password` in the search box, and then press **Enter** on your keyboard to search the Secret. - - ![search-secret](/images/docs/appstore/external-apps/deploy-gitlab/search_secret.png) +1. Go to **Secrets** under **Configuration**, enter `gitlab-initial-root-password` in the search box, and then press **Enter** on your keyboard to search the Secret. 2. Click the Secret to go to its detail page, and then click in the upper-right corner to view the password. Make sure you copy it. - ![password](/images/docs/appstore/external-apps/deploy-gitlab/initial_password.png) - ### Step 4: Edit the hosts file -1. Find the hosts file on your local machine. +1. Find the `hosts` file on your local machine. {{< notice note >}} - The path of hosts file is `/etc/hosts` for Linux, or `c:\windows\system32\drivers\etc\hosts` for Windows. + The path of the `hosts` file is `/etc/hosts` for Linux, or `c:\windows\system32\drivers\etc\hosts` for Windows. {{}} -2. Add the following item into the hosts file. +2. Add the following item into the `hosts` file. ``` 192.168.4.3 gitlab.demo-project.svc.cluster.local @@ -126,9 +98,7 @@ This tutorial demonstrates how to deploy GitLab on KubeSphere. ### Step 5: Access GitLab -1. Go to **Services** under **Application Workloads**, enter `nginx-ingress-controller` in the search box, and then press **Enter** on your keyboard to search the Service. You can see the Service is being exposed through port `31246`, which you can use to access GitLab. - - ![search-service](/images/docs/appstore/external-apps/deploy-gitlab/search_service.png) +1. Go to **Services** under **Application Workloads**, enter `nginx-ingress-controller` in the search box, and then press **Enter** on your keyboard to search the Service. You can see the Service has been exposed through port `31246`, which you can use to access GitLab. {{< notice note >}} diff --git a/content/en/docs/application-store/external-apps/deploy-litmus.md b/content/en/docs/application-store/external-apps/deploy-litmus.md index baaf18f35..97304ef23 100644 --- a/content/en/docs/application-store/external-apps/deploy-litmus.md +++ b/content/en/docs/application-store/external-apps/deploy-litmus.md @@ -26,41 +26,36 @@ This tutorial demonstrates how to deploy Litmus on KubeSphere and create chaos e 2. In the dialog that appears, set a name for the repository (for example, `litmus`) and enter the URL `https://litmuschaos.github.io/litmus-helm/`. Click **Validate** to verify the URL. You will see icon if the URL is available. Click **OK** to continue. -3. The app repository will be displayed in the list after it is successfully imported. +3. The app repository displays in the list after it is successfully imported. - ![imported-successfully](/images/docs/appstore/external-apps/deploy-litmus/imported-successfully.png) +### Step 2: Deploy the Litmus portal +1. Log out of the KubeSphere console and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads**, and then click **Create**. -### Step 2: Deploy Litmus Portal -1. Log out of the KubeSphere console and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads**, and then click **Deploy New App**. +2. In the dialog that appears, choose **From App Template**. -2. In the dialog that appears, choose **From App Templates**. + - **From App Store**: Select apps from the official APP Store of Kubephere. - - **From App Store**: select apps from the official APP Store of Kubephere. - - - **From App Templates**: select apps from workspace app templates and the third-party Helm app templates of App Repository. + - **From App Template**: Select apps from workspace app templates and the third-party Helm app templates of App Repository. 3. In the drop-down list, choose `litmus`, and then choose `litmus-2-0-0-beta`. -4. You can view the app information and chart files. Under **Versions**, select a specific version and click **Deploy**. +4. You can view the app information and chart files. Under **Versions**, select a specific version and click **Install**. 5. Under **Basic Information**, set a name for the app. Check the app version and the deployment location, and then click **Next**. -6. Under **App Configurations**, you can edit the yaml file or directly click **Deploy**. +6. Under **App Settings**, you can edit the yaml file or directly click **Install**. -7. The app will be displayed in the list after you create it. - - ![litmus-running](/images/docs/appstore/external-apps/deploy-litmus/litmus-running.png) +7. The app displays in the list after you create it successfully. {{< notice note>}} - It make take a while before Litmus is running. Please wait for the deployment to finish. + It may take a while before Litmus is running. Please wait for the deployment to finish. {{}} -### Step 3: Access Litmus Portal +### Step 3: Access Litmus portal 1. Go to **Services** under **Application Workloads**, copy the `NodePort` of `litmusportal-frontend-service`. - ![litmus-nodeport](/images/docs/appstore/external-apps/deploy-litmus/litmus-nodeport.png) 2. You can access Litmus `Portal` through `${NodeIP}:${NODEPORT}` using the default username and password (`admin`/`litmus`). @@ -69,8 +64,8 @@ This tutorial demonstrates how to deploy Litmus on KubeSphere and create chaos e ![litmus-login-1](/images/docs/appstore/external-apps/deploy-litmus/litmus-login-1.png) {{< notice note >}} - You may need to open the port in your security groups and configure port forwarding rules depending on where your Kubernetes cluster is deployed. Make sure you use your own `NodeIP`. - {{}} + You may need to open the port in your security groups and configure port forwarding rules depending on where your Kubernetes cluster is deployed. Make sure you use your own `NodeIP`. + {{}} ### Step 4: Deploy Agent (optional) @@ -90,7 +85,7 @@ For details about how to deploy External Agent, see [Litmus Docs](https://litmus $ kubectl create deployment nginx --image=nginx --replicas=2 --namespace=default ``` -2. Log in to Litmus `Portal`, and then click **Schedule a workflow**. +2. Log in to Litmus `Portal`, and then click **Schedule workflow**. 3. Choose an `Agent` (for example, `Self-Agent`), and then click **Next**. @@ -110,8 +105,6 @@ For details about how to deploy External Agent, see [Litmus Docs](https://litmus On the KubeSphere console, you can see that a Pod is being deleted and recreated. - ![terminate-and-recreate](/images/docs/appstore/external-apps/deploy-litmus/terminate-and-recreate.png) - On the Litmus `Portal`, you can see that the experiment is successful. ![litmus-successful](/images/docs/appstore/external-apps/deploy-litmus/litmus-successful.png) @@ -123,22 +116,16 @@ For details about how to deploy External Agent, see [Litmus Docs](https://litmus - **Experiment 2** -1. Perform step 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-cpu-hog`). +1. Perform steps 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-cpu-hog`). ![](https://pek3b.qingstor.com/kubesphere-community/images/20210604171414.png) 2. On the KubeSphere console, you can see that the pod CPU usage is close to 1 core. - ![pod-cpu-hog](/images/docs/appstore/external-apps/deploy-litmus/pod-cpu-hog.png) - - **Experiment 3** 1. Set the `nginx` replica to `1`. You can see there is only one pod left and view the Pod IP address. - ![nginx-replica](/images/docs/appstore/external-apps/deploy-litmus/nginx-replica.png) - - ![one-pod-left](/images/docs/appstore/external-apps/deploy-litmus/one-pod-left.png) - -2. Perform step 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-network-loss`). +2. Perform steps 1 to 10 in **Experiment 1** to create a new chaos experiment (`pod-network-loss`). ![](https://pek3b.qingstor.com/kubesphere-community/images/20210604174057.png) diff --git a/content/en/docs/application-store/external-apps/deploy-metersphere.md b/content/en/docs/application-store/external-apps/deploy-metersphere.md index da8fac2f8..756ebd20a 100644 --- a/content/en/docs/application-store/external-apps/deploy-metersphere.md +++ b/content/en/docs/application-store/external-apps/deploy-metersphere.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to deploy MeterSphere on KubeSphere. ## Prerequisites - You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -21,51 +21,27 @@ This tutorial demonstrates how to deploy MeterSphere on KubeSphere. 1. Log in to KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. - ![add-repo](/images/docs/appstore/external-apps/deploy-metersphere/add-repo.PNG) - 2. In the dialog that appears, enter `metersphere` for the app repository name and `https://charts.kubesphere.io/test` for the MeterSphere repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. - ![add-metersphere-repo](/images/docs/appstore/external-apps/deploy-metersphere/add-metersphere-repo.PNG) - -3. Your repository displays in the list after successfully imported to KubeSphere. - - ![added-metersphere-repo](/images/docs/appstore/external-apps/deploy-metersphere/added-metersphere-repo.PNG) +3. Your repository displays in the list after it is successfully imported to KubeSphere. ### Step 2: Deploy MeterSphere -1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Deploy New App**. +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. - ![deploy-app](/images/docs/appstore/external-apps/deploy-metersphere/deploy-app.PNG) - -2. In the dialog that appears, select **From App Templates**. - - ![from-app-templates](/images/docs/appstore/external-apps/deploy-metersphere/from-app-templates.PNG) +2. In the dialog that appears, select **From App Template**. 3. Select `metersphere` from the drop-down list, then click **metersphere-chart**. - ![click-metersphere](/images/docs/appstore/external-apps/deploy-metersphere/click-metersphere.PNG) - -4. On the **App Information** tab and the **Chart Files** tab, you can view the default configuration from the console. Click **Deploy** to continue. - - ![view-config](/images/docs/appstore/external-apps/deploy-metersphere/view-config.PNG) +4. On the **App Information** tab and the **Chart Files** tab, you can view the default configuration from the console. Click **Install** to continue. 5. On the **Basic Information** page, you can view the app name, app version, and deployment location. Click **Next** to continue. - ![basic-info](/images/docs/appstore/external-apps/deploy-metersphere/basic-info.PNG) - -6. On the **App Configurations** page, change the value of `imageTag` from `master` to `v1.6`, and then click **Deploy**. - - ![change-value](/images/docs/appstore/external-apps/deploy-metersphere/change-value.PNG) +6. On the **App Settings** page, change the value of `imageTag` from `master` to `v1.6`, and then click **Install**. 7. Wait for MeterSphere to be up and running. - ![metersphere-running](/images/docs/appstore/external-apps/deploy-metersphere/metersphere-running.PNG) - 8. Go to **Workloads**, and you can see two Deployments and three StatefulSets created for MeterSphere. - - ![deployments-running](/images/docs/appstore/external-apps/deploy-metersphere/deployments-running.PNG) - - ![statefulsets-running](/images/docs/appstore/external-apps/deploy-metersphere/statefulsets-running.PNG) {{< notice note >}} @@ -77,8 +53,6 @@ This tutorial demonstrates how to deploy MeterSphere on KubeSphere. 1. Go to **Services** under **Application Workloads**, and you can see the MeterSphere Service and its type is set to `NodePort` by default. - ![metersphere-service](/images/docs/appstore/external-apps/deploy-metersphere/metersphere-service.PNG) - 2. You can access MeterSphere through `:` using the default account and password (`admin/metersphere`). ![login-metersphere](/images/docs/appstore/external-apps/deploy-metersphere/login-metersphere.PNG) diff --git a/content/en/docs/application-store/external-apps/deploy-tidb.md b/content/en/docs/application-store/external-apps/deploy-tidb.md index d8dd1d71a..82743a82a 100644 --- a/content/en/docs/application-store/external-apps/deploy-tidb.md +++ b/content/en/docs/application-store/external-apps/deploy-tidb.md @@ -14,7 +14,7 @@ This tutorial demonstrates how to deploy TiDB Operator and a TiDB Cluster on Kub - You need to have at least 3 schedulable nodes. - You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -42,78 +42,46 @@ This tutorial demonstrates how to deploy TiDB Operator and a TiDB Cluster on Kub 1. Log out of KubeSphere and log back in as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. - ![add-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG) +2. In the displayed dialog box, enter `pingcap` for the app repository name and `https://charts.pingcap.org` for the PingCAP Helm repository URL. Click **Validate** to verify the URL, and you will see a green check mark next to the URL if it is available. Click **OK** to continue. -2. In the dialog that appears, enter `pingcap` for the app repository name and `https://charts.pingcap.org` for the PingCAP Helm repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue. - - ![add-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG) - -3. Your repository displays in the list after successfully imported to KubeSphere. - - ![added-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG) +3. Your repository displays in the list after it is successfully imported to KubeSphere. ### Step 3: Deploy TiDB Operator -1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Deploy New App**. +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. - ![deploy-app](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG) - -2. In the dialog that appears, select **From App Templates**. - - ![from-app-templates](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG) +2. In the displayed dialog box, select **From App Template**. 3. Select `pingcap` from the drop-down list, then click **tidb-operator**. - ![click-tidb-operator](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG) - {{< notice note >}} This tutorial only demonstrates how to deploy TiDB Operator and a TiDB cluster. You can also deploy other tools based on your needs. {{}} -4. On the **Chart Files** tab, you can view the configuration from the console directly or download the default `values.yaml` file by clicking the icon in the upper-right corner. Under **Versions**, select a version number from the drop-down list and click **Deploy**. - - ![select-version](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG) +4. On the **Chart Files** tab, you can view the configuration on the console directly or download the default `values.yaml` file by clicking the icon in the upper-right corner. Under **Versions**, select a version number from the drop-down list and click **Install**. 5. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. - ![basic-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG) - -6. On the **App Configurations** page, you can either edit the `values.yaml` file, or click **Deploy** directly with the default configurations. - - ![check-config-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG) +6. On the **App Settings** page, you can either edit the `values.yaml` file, or click **Install** directly with the default configurations. 7. Wait for TiDB Operator to be up and running. - ![tidb-operator-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG) - 8. Go to **Workloads**, and you can see two Deployments created for TiDB Operator. - ![tidb-deployment](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG) - ### Step 4: Deploy a TiDB cluster The process of deploying a TiDB cluster is similar to deploying TiDB Operator. -1. Go to **Apps** under **Application Workloads**, click **Deploy New App**, and then select **From App Templates**. - - ![deploy-app-again](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG) - - ![from-app-templates-2](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG) +1. Go to **Apps** under **Application Workloads**, click **Create**, and then select **From App Template**. 2. From the PingCAP repository, click **tidb-cluster**. - ![click-tidb-cluster](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG) - -3. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Deploy** to continue. - - ![download-yaml-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG) +3. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Install** to continue. 4. On the **Basic Information** page, confirm the app name, app version, and deployment location. Click **Next** to continue. - ![tidb-cluster-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG) - 5. Some TiDB components require [persistent volumes](../../../cluster-administration/persistent-volume-and-storage-class/). You can run the following command to view your storage classes. ``` @@ -126,9 +94,7 @@ The process of deploying a TiDB cluster is similar to deploying TiDB Operator. csi-super-high-perf csi-qingcloud Delete Immediate true 71m ``` -6. On the **App Configurations** page, change all the default values of the field `storageClassName` from `local-storage` to the name of your storage class. For example, you can change them to `csi-standard` based on the above output. - - ![tidb-cluster-config](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG) +6. On the **App Settings** page, change all the default values of the field `storageClassName` from `local-storage` to the name of your storage class. For example, you can change them to `csi-standard` based on the above output. {{< notice note >}} @@ -136,20 +102,14 @@ The process of deploying a TiDB cluster is similar to deploying TiDB Operator. {{}} -7. Click **Deploy** and you can see two apps in the list as shown below: - - ![tidb-cluster-app-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG) +7. Click **Install**, and you can see two apps in the list. ### Step 5: View TiDB cluster status 1. Go to **Workloads** under **Application Workloads**, and verify that all TiDB cluster Deployments are up and running. - ![tidb-cluster-deployments-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG) - 2. Switch to the **StatefulSets** tab, and you can see TiDB, TiKV and PD are up and running. - ![tidb-statefulsets](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG) - {{< notice note >}} TiKV and TiDB will be created automatically and it may take a while before they display in the list. @@ -158,43 +118,19 @@ The process of deploying a TiDB cluster is similar to deploying TiDB Operator. 3. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab. - TiDB metrics: - - ![tidb-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG) - - TiKV metrics: - - ![tikv-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG) - - PD metrics: - - ![pd-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG) - 4. In **Pods** under **Application Workloads**, you can see the TiDB cluster contains two TiDB Pods, three TiKV Pods, and three PD Pods. - ![tidb-pod-list](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG) - 5. In **Volumes** under **Storage**, you can see TiKV and PD are using persistent volumes. - ![tidb-storage-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG) - -6. Volume usage is also monitored. Click a volume item to go to its detail page. Here is an example of TiKV: - - ![tikv-volume-status](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG) +6. Volume usage is also monitored. Click a volume item to go to its detail page. 7. On the **Overview** page of the project, you can see a list of resource usage in the current project. - ![tidb-project-resource-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG) - ### Step 6: Access the TiDB cluster 1. Go to **Services** under **Application Workloads**, and you can see detailed information of all Services. As the Service type is set to `NodePort` by default, you can access it through the Node IP address outside the cluster. - ![tidb-service](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG) - -3. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `:` to view metrics. - - ![tidb-service-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG) +2. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `:` to view metrics. ![tidb-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.PNG) diff --git a/content/en/docs/cluster-administration/application-resources-monitoring.md b/content/en/docs/cluster-administration/application-resources-monitoring.md index 6259c0d51..a267ee57d 100644 --- a/content/en/docs/cluster-administration/application-resources-monitoring.md +++ b/content/en/docs/cluster-administration/application-resources-monitoring.md @@ -11,32 +11,20 @@ In addition to monitoring data at the physical resource level, cluster administr ## Prerequisites -You need an account granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account. +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. ## Resource Usage -1. Click **Platform** in the top-left corner and select **Cluster Management**. - - ![Platform](/images/docs/cluster-administration/cluster-status-monitoring/platform.png) +1. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly. - ![Cluster Management](/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png) +3. Choose **Application Resources** under **Monitoring & Alerting** to see the overview of application resources, including the summary of the usage of all resources in the cluster. -3. Choose **Application Resources** under **Monitoring & Alerting** to see the overview of application resource monitoring, including the summary of the usage of all resources in the cluster, as shown in the following figure. +4. Among them, **Cluster Resource Usage** and **Application Resource Usage** retain the monitoring data of the last 7 days and support custom time range queries. - ![Resource Usage](/images/docs/cluster-administration/application-resources-monitoring/application-resources-monitoring.png) - -4. Among them, **Cluster Resources Usage** and **Application Resources Usage** retain the monitoring data of the last 7 days and support custom time range queries. - - ![Time Range](/images/docs/cluster-administration/application-resources-monitoring/time-range.png) - -5. Click a specific resource to view detailed usage and trends of it during a certain time period, such as **CPU** under **Cluster Resources Usage**. The detail page allows you to view specific monitoring data by project. The highly-interactive dashboard enables users to customize the time range, displaying the exact resource usage at a given time point. - - ![Cluster Resources Usage](/images/docs/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png) +5. Click a specific resource to view detailed usage and trends of it during a certain time period, such as **CPU** under **Cluster Resource Usage**. The detail page allows you to view specific monitoring data by project. The highly-interactive dashboard enables users to customize the time range, displaying the exact resource usage at a given time point. ## Usage Ranking -**Usage Ranking** supports the sorting of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including **CPU Usage**, **Memory Usage**, **Pod Count**, as well as **Outbound Traffic** and **Inbound Traffic**. You can sort projects in ascending or descending order by one of the indicators in the drop-down list. This feature is very useful for quickly locating your application (Pod) that is consuming heavy CPU or memory. - -![Usage Ranking](/images/docs/cluster-administration/application-resources-monitoring/usage-ranking.png) +**Usage Ranking** supports the sorting of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including **CPU usage**, **memory usage**, **Pod count**, **inbound traffic** and **outbound traffic**. You can sort projects in ascending or descending order by one of the indicators in the drop-down list. This feature is very useful for quickly locating your application (Pod) that is consuming heavy CPU or memory. diff --git a/content/en/docs/cluster-administration/cluster-settings/cluster-gateway.md b/content/en/docs/cluster-administration/cluster-settings/cluster-gateway.md new file mode 100644 index 000000000..47882dd42 --- /dev/null +++ b/content/en/docs/cluster-administration/cluster-settings/cluster-gateway.md @@ -0,0 +1,82 @@ +--- +title: "Cluster Gateway" +keywords: 'KubeSphere, Kubernetes, Cluster, Gateway, NodePort, LoadBalancer' +description: 'Learn how to create a cluster-scope gateway on KubeSphere.' +linkTitle: "Cluster Gateway" +weight: 8630 +--- + +KubeSphere 3.2.x provides cluster-scope gateways to let all projects share a global gateway. This document describes how to set a cluster gateway on KubeSphere. + +## Prerequisites + +You need to prepare a user with the `platform-admin` role, for example, `admin`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create a Cluster Gateway + +1. Log in to the KubeSphere web console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. + +2. Go to **Gateway Settings** under **Cluster Settings** from the navigation pane, select the **Cluster Gateway** tab, and click **Enable Gateway**. + +3. In the displayed dialog box, select an access mode for the gateway from the following two options: + + - **NodePort**: Access Services with corresponding node ports through the gateway. The NodePort access mode provides the following configurations: + - **Tracing**: Turn on the **Tracing** toggle to enable the Tracing feature on KubeSphere. Once it is enabled, check whether an annotation (`nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route when the route is inaccessible. If not, add an annotation to your route. + - **Configuration Options**: Add key-value pairs to the cluster gateway. + - **LoadBalancer**: Access Services with a single IP address through the gateway. The LoadBalancer access mode provides the following configurations: + - **Tracing**: Turn on the **Tracing** toggle to enable the Tracing feature on KubeSphere. Once it is enabled, check whether an annotation (`nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route when the route is inaccessible. If not, add an annotation to your route. + - **Load Balancer Provider**: Select a load balancer provider from the drop-down list. + - **Annotations**: Add annotations to the cluster gateway. + - **Configuration Options**: Add key-value pairs to the cluster gateway. + + {{< notice info >}} + + - To use the Tracing feature, turn on **Application Governance** when you create composed applications. + - For more information about how to use configuration options, see [Configuration options](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options). + + {{}} + +4. Click **OK** to create the cluster gateway. + +5. The cluster gateway created is displayed and the basic information of the gateway is also shown on the page. + + {{< notice note >}} + + A gateway named `kubesphere-router-kubesphere-system` is also created, which serves as a global gateway for all projects in your cluster. + + {{}} + +6. Click **Manage** to select an operation from the drop-down menu: + + - **View Details**: Go to the details page of the cluster gateway. + - **Edit**: Edit configurations of the cluster gateway. + - **Disable**: Disable the cluster gateway. + +7. After a cluster gateway is created, see [Routes](../../../project-user-guide/application-workloads/routes/#create-a-route) for more information about how to create a route. + +## Cluster Gateway Details Page + +1. Under the **Cluster Gateway** tab, click **Manage** on the right of a cluster gateway and select **View Details** to open its details page. +2. On the details page, click **Edit** to edit configurations of the cluster gateway or click **More** to select an operation. +3. Click the **Monitoring** tab to view the monitoring metrics of the cluster gateway. +4. Click the **Configuration Options** tab to view configuration options of the cluster gateway. +5. Click the **Gateway Logs** tab to view logs of the cluster gateway. +6. Click the **Resource Status** tab to view workload status of the cluster gateway. Click or to scale up or scale down the number of replicas. +7. Click the **Metadata** tab to view annotations of the cluster gateway. + +## View Project Gateways + +On the **Gateway Settings** page, click the **Project Gateway** tab to view project gateways. + +Click on the right of a project gateway to select an operation from the drop-down menu: + +- **Edit**: Edit configurations of the project gateway. +- **Disable**: Disable the project gateway. + +{{< notice note >}} + +If a project gateway exists prior to the creation of a cluster gateway, the project gateway address may switch between the address of the cluster gateway and that of the project gateway. It is recommended that you should use either the cluster gateway or project gateway. + +{{}} + +For more information about how to create project gateways, see [Project Gateway](../../../project-administration/project-gateway/). \ No newline at end of file diff --git a/content/en/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md b/content/en/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md index a92a9a630..e95527e5b 100644 --- a/content/en/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md +++ b/content/en/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md @@ -12,28 +12,22 @@ This guide demonstrates how to set cluster visibility. ## Prerequisites * You need to enable the [multi-cluster feature](../../../multicluster-management/). -* You need to have a workspace and an account that has the permission to create workspaces, such as `ws-manager`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +* You need to have a workspace and a user that has the permission to create workspaces, such as `ws-manager`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Set Cluster Visibility ### Select available clusters when you create a workspace -1. Log in to KubeSphere with an account that has the permission to create a workspace, such as `ws-manager`. +1. Log in to KubeSphere with a user that has the permission to create a workspace, such as `ws-manager`. -2. Click **Platform** in the top-left corner and select **Access Control**. In **Workspaces** from the navigation bar, click **Create**. - - ![create-workspace](/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.jpg) +2. Click **Platform** in the upper-left corner and select **Access Control**. In **Workspaces** from the navigation bar, click **Create**. 3. Provide the basic information for the workspace and click **Next**. -4. On the **Select Clusters** page, you can see a list of available clusters. Check the cluster that you want to allocate to the workspace and click **Create**. - - ![select-cluster](/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-cluster.jpg) +4. On the **Cluster Settings** page, you can see a list of available clusters. Select the clusters that you want to allocate to the workspace and click **Create**. 5. After the workspace is created, workspace members with necessary permissions can create resources that run on the associated cluster. - ![create-project](/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png) - {{< notice warning >}} Try not to create resources on the host cluster to avoid excessive loads, which can lead to a decrease in the stability across clusters. @@ -44,20 +38,16 @@ Try not to create resources on the host cluster to avoid excessive loads, which After a workspace is created, you can allocate additional clusters to the workspace through authorization or unbind a cluster from the workspace. Follow the steps below to adjust the visibility of a cluster. -1. Log in to KubeSphere with an account that has the permission to manage clusters, such as `admin`. +1. Log in to KubeSphere with a user that has the permission to manage clusters, such as `admin`. -2. Click **Platform** in the top-left corner and select **Cluster Management**. Select a cluster from the list to view cluster information. +2. Click **Platform** in the upper-left corner and select **Cluster Management**. Select a cluster from the list to view cluster information. 3. In **Cluster Settings** from the navigation bar, select **Cluster Visibility**. 4. You can see the list of authorized workspaces, which means the current cluster is available to resources in all these workspaces. - ![workspace-list](/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.jpg) - -5. Click **Edit Visibility** to set the cluster authorization. You can select new workspaces that will be able to use the cluster or unbind it from a workspace. - - ![assign-workspace](/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.jpg) +5. Click **Edit Visibility** to set the cluster visibility. You can select new workspaces that will be able to use the cluster or unbind it from a workspace. ### Make a cluster public -You can check **Set as public cluster** so that platform users can access the cluster, in which they are able to create and schedule resources. +You can check **Set as Public Cluster** so that platform users can access the cluster, in which they are able to create and schedule resources. diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/_index.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/_index.md index c5a4795b4..275de2bb0 100644 --- a/content/en/docs/cluster-administration/cluster-settings/log-collections/_index.md +++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/_index.md @@ -1,5 +1,5 @@ --- -linkTitle: "Log Collection" +linkTitle: "Log Receivers" weight: 8620 _build: diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md index 6f1286f70..a50244b43 100644 --- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md +++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md @@ -1,21 +1,21 @@ --- title: "Add Elasticsearch as a Receiver" keywords: 'Kubernetes, log, elasticsearch, pod, container, fluentbit, output' -description: 'Learn how to add Elasticsearch to receive logs, events or auditing logs.' +description: 'Learn how to add Elasticsearch to receive container logs, resource events, or audit logs.' linkTitle: "Add Elasticsearch as a Receiver" weight: 8622 --- -You can use Elasticsearch, Kafka and Fluentd as log receivers in KubeSphere. This tutorial demonstrates how to add an Elasticsearch receiver. +You can use Elasticsearch, Kafka, and Fluentd as log receivers in KubeSphere. This tutorial demonstrates how to add an Elasticsearch receiver. ## Prerequisites -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. - Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. ## Add Elasticsearch as a Receiver -1. Log in to KubeSphere as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. {{< notice note >}} @@ -23,15 +23,13 @@ If you have enabled the [multi-cluster feature](../../../../multicluster-managem {{}} -2. On the **Cluster Management** page, go to **Log Collection** in **Cluster Settings**. +2. On the **Cluster Management** page, go to **Log Receivers** in **Cluster Settings**. 3. Click **Add Log Receiver** and choose **Elasticsearch**. -4. Provide the Elasticsearch service address and port as below: +4. Provide the Elasticsearch service address and port number. - ![add-es](/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-es.png) +5. Elasticsearch will appear in the receiver list on the **Log Receivers** page, the status of which is **Collecting**. -5. Elasticsearch will appear in the receiver list on the **Log Collection** page, the status of which is **Collecting**. - -6. To verify whether Elasticsearch is receiving logs sent from Fluent Bit, click **Log Search** in the **Toolbox** in the bottom-right corner and search logs on the console. For more information, read [Log Query](../../../../toolbox/log-query/). +6. To verify whether Elasticsearch is receiving logs sent from Fluent Bit, click **Log Search** in the **Toolbox** in the lower-right corner and search logs on the console. For more information, read [Log Query](../../../../toolbox/log-query/). diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md index 985d640ab..b674da974 100644 --- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md +++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md @@ -1,7 +1,7 @@ --- title: "Add Fluentd as a Receiver" keywords: 'Kubernetes, log, fluentd, pod, container, fluentbit, output' -description: 'Learn how to add Fluentd to receive logs, events or auditing logs.' +description: 'Learn how to add Fluentd to receive logs, events or audit logs.' linkTitle: "Add Fluentd as a Receiver" weight: 8624 --- @@ -13,9 +13,9 @@ You can use Elasticsearch, Kafka and Fluentd as log receivers in KubeSphere. Thi ## Prerequisites -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. -- Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. +- Before adding a log receiver, you need to enable any of the `logging`, `events`, or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. ## Step 1: Deploy Fluentd as a Deployment @@ -25,7 +25,7 @@ Run the following commands: {{< notice note >}} -- The following commands create the Fluentd Deployment, Service and ConfigMap in the `default` namespace and add a filter to the Fluentd ConfigMap to exclude logs from the `default` namespace to avoid Fluent Bit and Fluentd loop log collections. +- The following commands create the Fluentd Deployment, Service, and ConfigMap in the `default` namespace and add a filter to the Fluentd ConfigMap to exclude logs from the `default` namespace to avoid Fluent Bit and Fluentd loop log collections. - Change the namespace if you want to deploy Fluentd into a different namespace. {{}} @@ -122,7 +122,7 @@ EOF ## Step 2: Add Fluentd as a Log Receiver -1. Log in to KubeSphere as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. {{< notice note >}} @@ -130,22 +130,20 @@ EOF {{}} -2. On the **Cluster Management** page, go to **Log Collection** in **Cluster Settings**. +2. On the **Cluster Management** page, go to **Log Receivers** in **Cluster Settings**. 3. Click **Add Log Receiver** and choose **Fluentd**. -4. Provide the Fluentd service address and port as below: +4. Provide the Fluentd service address and port number. - ![add-fluentd](/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-fluentd.png) - -5. Fluentd will appear in the receiver list on the **Log Collection** page, the status of which is **Collecting**. +5. Fluentd will appear in the receiver list on the **Log Receivers** page, the status of which is **Collecting**. ## Step 3: Verify Fluentd is Receiving Logs Sent from Fluent Bit 1. Click **Application Workloads** on the **Cluster Management** page. -2. Select **Workloads** and then select the `default` project from the drop-down list on the **Deployments** tab. +2. Select **Workloads** and then select the `default` project on the **Deployments** tab. 3. Click the **fluentd** item and then select the **fluentd-xxxxxxxxx-xxxxx** Pod. @@ -153,6 +151,4 @@ EOF 5. On the **fluentd** container page, select the **Container Logs** tab. -6. You can see logs begin to scroll up continuously. - - ![container-logs](/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/container-logs.png) \ No newline at end of file +6. You can see logs begin to scroll up continuously. \ No newline at end of file diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md index 592f63a01..a4325b5c6 100644 --- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md +++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md @@ -1,7 +1,7 @@ --- title: "Add Kafka as a Receiver" keywords: 'Kubernetes, log, kafka, pod, container, fluentbit, output' -description: 'Learn how to add Kafka to receive logs, events or auditing logs.' +description: 'Learn how to add Kafka to receive container logs, resource events, or audit logs.' linkTitle: "Add Kafka as a Receiver" weight: 8623 --- @@ -13,7 +13,7 @@ You can use Elasticsearch, Kafka and Fluentd as log receivers in KubeSphere. Thi ## Prerequisites -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. - Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). `logging` is enabled as an example in this tutorial. ## Step 1: Create a Kafka Cluster and a Kafka Topic @@ -101,7 +101,7 @@ You can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-op ## Step 2: Add Kafka as a Log Receiver -1. Log in to KubeSphere as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. {{< notice note >}} @@ -109,18 +109,16 @@ You can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-op {{}} -2. On the **Cluster Management** page, go to **Log Collection** in **Cluster Settings**. +2. On the **Cluster Management** page, go to **Log Receivers** in **Cluster Settings**. -3. Click **Add Log Receiver** and select **Kafka**. Enter the Kafka broker address and port as below, and then click **OK** to continue. +3. Click **Add Log Receiver** and select **Kafka**. Enter the Kafka service address and port number, and then click **OK** to continue. - | Address | Port | + | Service Address | Port Number | | ------------------------------------------------------- | ---- | | my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc | 9092 | | my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc | 9092 | | my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc | 9092 | - ![add-kafka](/images/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/add-kafka.png) - 4. Run the following commands to verify whether the Kafka cluster is receiving logs sent from Fluent Bit: ```bash diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/introduction.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/introduction.md index 6a68492d8..81b77f3c0 100644 --- a/content/en/docs/cluster-administration/cluster-settings/log-collections/introduction.md +++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/introduction.md @@ -1,20 +1,20 @@ --- -title: "Introduction to Log Collection" +title: "Introduction to Log Receivers" keywords: 'Kubernetes, log, elasticsearch, kafka, fluentd, pod, container, fluentbit, output' -description: 'Learn the basics of cluster log collection, including tools and general steps.' +description: 'Learn the basics of cluster log receivers, including tools, and general steps.' linkTitle: "Introduction" weight: 8621 --- -KubeSphere provides a flexible log collection configuration method. Powered by [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator/), users can easily add, modify, delete, enable or disable Elasticsearch, Kafka and Fluentd receivers. Once a receiver is added, logs will be sent to this receiver. +KubeSphere provides a flexible log receiver configuration method. Powered by [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator/), users can easily add, modify, delete, enable, or disable Elasticsearch, Kafka and Fluentd receivers. Once a receiver is added, logs will be sent to this receiver. This tutorial gives a brief introduction about the general steps of adding log receivers in KubeSphere. ## Prerequisites -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. -- Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). +- Before adding a log receiver, you need to enable any of the `Logging`, `Events` or `Auditing` components. For more information, see [Enable Pluggable Components](../../../../pluggable-components/). ## Add a Log Receiver for Container Logs @@ -22,7 +22,7 @@ To add a log receiver: 1. Log in to the web console of KubeSphere as `admin`. -2. Click **Platform** in the top-left corner and select **Cluster Management**. +2. Click **Platform** in the upper-left corner and select **Cluster Management**. {{< notice note >}} @@ -30,9 +30,9 @@ To add a log receiver: {{}} -3. Go to **Log Collection** under **Cluster Settings** in the sidebar. +3. Go to **Log Receivers** under **Cluster Settings** in the sidebar. -4. Click **Add Log Receiver** on the **Logging** tab. +4. On the log receivers list page, click **Add Log Receiver**. {{< notice note >}} @@ -43,9 +43,9 @@ To add a log receiver: ### Add Elasticsearch as a log receiver -A default Elasticsearch receiver will be added with its service address set to an Elasticsearch cluster if `logging`, `events`, or `auditing` is enabled in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +A default Elasticsearch receiver will be added with its service address set to an Elasticsearch cluster if `logging`, `events`, or `auditing` is enabled in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). -An internal Elasticsearch cluster will be deployed to the Kubernetes cluster if neither `externalElasticsearchUrl` nor `externalElasticsearchPort` is specified in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) when `logging`, `events` or `auditing` is enabled. The internal Elasticsearch cluster is for testing and development only. It is recommended that you configure an external Elasticsearch cluster for production. +An internal Elasticsearch cluster will be deployed to the Kubernetes cluster if neither `externalElasticsearchHost` nor `externalElasticsearchPort` is specified in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) when `logging`, `events`, or `auditing` is enabled. The internal Elasticsearch cluster is for testing and development only. It is recommended that you configure an external Elasticsearch cluster for production. Log searching relies on the internal or external Elasticsearch cluster configured. @@ -59,33 +59,29 @@ Kafka is often used to receive logs and serves as a broker to other processing s If you need to output logs to more places other than Elasticsearch or Kafka, you can add Fluentd as a log receiver. Fluentd has numerous output plugins which can forward logs to various destinations such as S3, MongoDB, Cassandra, MySQL, syslog, and Splunk. [Add Fluentd as a Receiver](../add-fluentd-as-receiver/) demonstrates how to add Fluentd to receive Kubernetes logs. -## Add a Log Receiver for Events or Auditing Logs +## Add a Log Receiver for Resource Events or Audit Logs -Starting from KubeSphere v3.0.0, the logs of Kubernetes events and the auditing logs of Kubernetes and KubeSphere can be archived in the same way as container logs. The tab **Events** or **Auditing** on the **Log Collection** page will appear if `events` or `auditing` is enabled accordingly in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). You can go to the corresponding tab to configure log receivers for Kubernetes events or Kubernetes and KubeSphere auditing logs. +Starting from KubeSphere v3.0.0, resource events and audit logs can be archived in the same way as container logs. The tab **Resource Events** or **Audit Logs** on the **Log Receivers** page will appear if `events` or `auditing` is enabled accordingly in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). You can go to the corresponding tab to configure log receivers for resource events or audit logs. -Container logs, Kubernetes events and Kubernetes and KubeSphere auditing logs should be stored in different Elasticsearch indices to be searched in KubeSphere. The index prefixes are: - -- `ks-logstash-log` for container logs -- `ks-logstash-events` for Kubernetes events -- `ks-logstash-auditing` for Kubernetes and KubeSphere auditing logs +Container logs, resource events, and audit logs should be stored in different Elasticsearch indices to be searched in KubeSphere. The index is automatically generated in - format. ## Turn a Log Receiver on or Off You can turn a log receiver on or off without adding or deleting it. To turn a log receiver on or off: -1. On the **Log Collection** page, click a log receiver and go to the receiver's detail page. +1. On the **Log Receivers** page, click a log receiver and go to the receiver's detail page. 2. Click **More** and select **Change Status**. -3. Select **Activate** or **Close** to turn the log receiver on or off. +3. Select **Collecting** or **Disabled** to turn the log receiver on or off. -4. A log receiver's status will be changed to **Close** if you turn it off, otherwise the status will be **Collecting** on the **Log Collection** page. +4. A log receiver's status will be changed to **Disabled** if you turn it off, otherwise the status will be **Collecting** on the **Log Receivers** page. -## Modify or Delete a Log Receiver +## Edit or Delete a Log Receiver -You can modify a log receiver or delete it: +You can edit a log receiver or delete it: -1. On the **Log Collection** page, click a log receiver and go to the receiver's detail page. +1. On the **Log Receivers** page, click a log receiver and go to the receiver's detail page. 2. Edit a log receiver by clicking **Edit** or **Edit YAML** from the drop-down list. -3. Delete a log receiver by clicking **Delete Log Receiver**. +3. Delete a log receiver by clicking **Delete**. diff --git a/content/en/docs/cluster-administration/cluster-status-monitoring.md b/content/en/docs/cluster-administration/cluster-status-monitoring.md index 1c4033323..b01a541c1 100644 --- a/content/en/docs/cluster-administration/cluster-status-monitoring.md +++ b/content/en/docs/cluster-administration/cluster-status-monitoring.md @@ -1,16 +1,16 @@ --- title: "Cluster Status Monitoring" keywords: "Kubernetes, KubeSphere, status, monitoring" -description: "Monitor how a cluster is functioning based on different metrics, including physical resources, etcd, and APIServer." +description: "Monitor how a cluster is functioning based on different metrics, including physical resources, etcd, and API server." linkTitle: "Cluster Status Monitoring" weight: 8200 --- -KubeSphere provides monitoring of related metrics such as CPU, memory, network, and disk of the cluster. You can also review historical monitoring data and sort nodes by different indicators based on their usage in **Cluster Status Monitoring**. +KubeSphere provides monitoring of related metrics such as CPU, memory, network, and disk of the cluster. You can also review historical monitoring data and sort nodes by different indicators based on their usage in **Cluster Status**. ## Prerequisites -You need an account granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account. +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. ## Cluster Status Monitoring @@ -18,143 +18,106 @@ You need an account granted a role including the authorization of **Cluster Mana 2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly. -3. Choose **Cluster Status** under **Monitoring & Alerting** to see the overview of cluster status monitoring, including **Cluster Node Status**, **Components Status**, **Cluster Resources Usage**, **ETCD Monitoring**, and **Service Component Monitoring**, as shown in the following figure. - - ![Cluster Status Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png) +3. Choose **Cluster Status** under **Monitoring & Alerting** to see the overview of cluster status monitoring, including **Cluster Node Status**, **Component Status**, **Cluster Resource Usage**, **etcd Monitoring**, and **Service Component Monitoring**. ### Cluster node status -1. **Cluster Node Status** displays the status of all nodes, separately marking the active ones. You can go to the **Cluster Nodes** page shown below to view the real-time resource usage of all nodes by clicking **Node Online Status**. +1. **Cluster Nodes Status** displays the status of all nodes, separately marking the active ones. You can go to the **Cluster Nodes** page to view the real-time resource usage of all nodes by clicking **Node Online Status**. - ![Cluster Nodes](/images/docs/cluster-administration/cluster-status-monitoring/cluster-nodes.png) +2. In **Cluster Nodes**, click the node name to view usage details in **Running Status**, including **Resource Usage**, **Allocated Resources**, and **Health Status**. -2. In **Cluster Nodes**, click the node name to view usage details in **Status**, including the information of CPU, Memory, Pod, Local Storage in the current node, and its health status. +3. Click the **Monitoring** tab to view how the node is functioning during a certain period based on different metrics, including **CPU Usage**, **Average CPU Load**, **Memory Usage**, **Disk Usage**, **Inode Usage**, **IOPS**, **Disk Throughput**, and **Network Bandwidth**. - ![status](/images/docs/cluster-administration/cluster-status-monitoring/status.png) - -3. Click the tab **Monitoring** to view how the node is functioning during a certain period based on different metrics, including **CPU Utilization, CPU Load Average, Memory Utilization, Disk Utilization, inode Utilization, IOPS, Disk Throughput, and Network Bandwidth**, as shown in the following figure. - - ![Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/monitoring.png) - - {{< notice tip >}}You can customize the time range from the drop-down list in the top-right corner to view historical data. + {{< notice tip >}}You can customize the time range from the drop-down list in the upper-right corner to view historical data. {{}} ### Component status KubeSphere monitors the health status of various service components in the cluster. When a key component malfunctions, the system may become unavailable. The monitoring mechanism of KubeSphere ensures the platform can notify tenants of any occurring issues in case of a component failure, so that they can quickly locate the problem and take corresponding action. -1. On the **Cluster Status Monitoring** page, click components (the part in the green box below) under **Components Status** to view the status of service components. - - ![component-monitoring](/images/docs/cluster-administration/cluster-status-monitoring/component-monitoring.jpg) +1. On the **Cluster Status** page, click a component under **Component Status** to view its status. 2. You can see all the components are listed in this part. Components marked in green are those functioning normally while those marked in orange require special attention as it signals potential issues. - ![Service Components Status](/images/docs/cluster-administration/cluster-status-monitoring/service-components-status.png) - {{< notice tip >}}Components marked in orange may turn to green after a period of time, the reasons of which may be different, such as image pulling retries or pod recreations. You can click the component to see its service details. {{}} -### Cluster resources usage +### Cluster resource usage -**Cluster Resources Usage** displays the information including **CPU Utilization, Memory Utilization, Disk Utilization, and Pod Quantity Trend** of all nodes in the cluster. Click the pie chart on the left to switch indicators, which shows the trend during a period in a line chart on the right. +**Cluster Resource Usage** displays the information including **CPU Usage**, **Memory Usage**, **Disk Usage**, and **Pods** of all nodes in the cluster. Click the pie chart on the left to switch indicators, which shows the trend during a period in a line chart on the right. -![Cluster Resources Usage](/images/docs/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png) +## Physical Resource Monitoring -## Physical Resources Monitoring +Monitoring data in **Physical Resource Monitoring** help users better observe their physical resources and establish normal standards for resource and cluster performance. KubeSphere allows users to view cluster monitoring data within the last 7 days, including **CPU Usage**, **Memory Usage**, **Average CPU Load (1 minute/5 minutes/15 minutes)**, **Disk Usage**, **Inode Usage**, **Disk Throughput (read/write)**, **IOPS (read/write)**, **Network Bandwidth**, and **Pod Status**. You can customize the time range and time interval to view historical monitoring data of physical resources in KubeSphere. The following sections briefly introduce each monitoring indicator. -Monitoring data in **Physical Resources Monitoring** help users better observe their physical resources and establish normal standards for resource and cluster performance. KubeSphere allows users to view cluster monitoring data within the last 7 days, including **CPU Utilization**, **Memory Utilization**, **CPU Load Average** **(1 minute/5 minutes/15 minutes)**, **inode Utilization**, **Disk Throughput (read/write)**, **IOPS (read/write)**, **Network Bandwidth**, and **Pod Status**. You can customize the time range and time interval to view historical monitoring data of physical resources in KubeSphere. The following sections briefly introduce each monitoring indicator. +### CPU usage -![Physical Resources Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png) +CPU usage shows how CPU resources are used in a period. If you notice that the CPU usage of the platform during a certain period soars, you must first locate the process that is occupying CPU resources the most. For example, for Java applications, you may expect a CPU usage spike in the case of memory leaks or infinite loops in the code. -### CPU utilization +### Memory usage -CPU utilization shows how CPU resources are used in a period. If you notice that the CPU usage of the platform during a certain period soars, you must first locate the process that is occupying CPU resources the most. For example, for Java applications, you may expect a CPU usage spike in the case of memory leaks or infinite loops in the code. +Memory is one of the important components on a machine, serving as a bridge for communications with the CPU. Therefore, the performance of memory has a great impact on the machine. Data loading, thread concurrency and I/O buffering are all dependent on memory when a program is running. The size of available memory determines whether the program can run normally and how it is functioning. Memory usage reflects how memory resources are used within a cluster as a whole, displayed as a percentage of available memory in use at a given moment. -![CPU Utilization](/images/docs/cluster-administration/cluster-status-monitoring/cpu-utilization.png) +### Average CPU load -### Memory utilization +Average CPU load is the average number of processes in the system in a runnable state and an uninterruptible state per unit time. Namely, it is the average number of active processes. Note that there is no direct relation between the average CPU load and the CPU usage. Ideally, the average load should be equal to the number of CPUs. Therefore, you need to consider the number of CPUs when you look into the average load. A system is overloaded only when the average load is greater than the number of CPUs. -Memory is one of the important components on a machine, serving as a bridge for communications with the CPU. Therefore, the performance of memory has a great impact on the machine. Data loading, thread concurrency and I/O buffering are all dependent on memory when a program is running. The size of available memory determines whether the program can run normally and how it is functioning. Memory utilization reflects how memory resources are used within a cluster as a whole, displayed as a percentage of available memory in use at a given moment. - -![Memory Utilization](/images/docs/cluster-administration/cluster-status-monitoring/memory-utilization.png) - -### CPU load average - -CPU load average is the average number of processes in the system in a runnable state and an uninterruptible state per unit time. Namely, it is the average number of active processes. Note that there is no direct relation between the CPU load average and the CPU utilization. Ideally, the load average should be equal to the number of CPUs. Therefore, you need to consider the number of CPUs when you look into the load average. A system is overloaded only when the load average is greater than the number of CPUs. - -KubeSphere provides users with three different time periods to view the load average: 1 minute, 5 minutes and 15 minutes. Normally, it is recommended that you review all of them to gain a comprehensive understanding of load averages: +KubeSphere provides users with three different time periods to view the average load: 1 minute, 5 minutes, and 15 minutes. Normally, it is recommended that you review all of them to gain a comprehensive understanding of average CPU load: - If the curves of 1 minute / 5 minutes / 15 minutes are similar within a certain period, it indicates that the CPU load of the cluster is relatively stable. - If the value of 1 minute in a certain period, or at a specific time point is much greater than that of 15 minutes, it means that the load in the last 1 minute is increasing, and you need to keep observing. Once the value of 1 minute exceeds the number of CPUs, it may mean that the system is overloaded. You need to further analyze the source of the problem. - Conversely, if the value of 1 minute in a certain period, or at a specific time point is much less than that of 15 minutes, it means that the load of the system is decreasing in the last 1 minute, and a high load has been generated in the previous 15 minutes. -![CPU Load Average](/images/docs/cluster-administration/cluster-status-monitoring/cpu-load-average.png) - ### Disk usage KubeSphere workloads such as `StatefulSets` and `DaemonSets` all rely on persistent volumes. Some components and services also require a persistent volume. Such backend storage relies on disks, such as block storage or network shared storage. In this connection, providing a real-time monitoring environment for disk usage is an important part of maintaining the high reliability of data. In the daily management of the Linux system, platform administrators may encounter data loss or even system crashes due to insufficient disk space. As an essential part of cluster management, they need to pay close attention to the disk usage of the system and ensure that the file system is not filling up or abused. By monitoring the historical data of disk usage, you can evaluate how disks are used during a given period of time. In the case of high disk usage, you can free up disk space by cleaning up unnecessary images or containers. -![Disk Usage](/images/docs/cluster-administration/cluster-status-monitoring/disk-usage.png) - -### inode utilization +### Inode usage Each file must have an inode, which is used to store the file's meta-information, such as the file's creator and creation date. The inode will also consume hard disk space, and many small cache files can easily lead to the exhaustion of inode resources. Also, the inode may be used up, but the hard disk is not full. In this case, new files cannot be created on the hard disk. -In KubeSphere, the monitoring of inode utilization can help you detect such situations in advance, as you can have a clear view of cluster inode usage. The mechanism prompts users to clean up temporary files in time, preventing the cluster from being unable to work due to inode exhaustion. - -![inode Utilization](/images/docs/cluster-administration/cluster-status-monitoring/inode-utilization.png) +In KubeSphere, the monitoring of inode usage can help you detect such situations in advance, as you can have a clear view of cluster inode usage. The mechanism prompts users to clean up temporary files in time, preventing the cluster from being unable to work due to inode exhaustion. ### Disk throughput The monitoring of disk throughput and IOPS is an indispensable part of disk monitoring, which is convenient for cluster administrators to adjust data layout and other management activities to optimize the overall performance of the cluster. Disk throughput refers to the speed of the disk transmission data stream (shown in MB/s), and the transmission data are the sum of data reading and writing. When large blocks of discontinuous data are being transmitted, this indicator is of great importance for reference. -![Disk Throughput](/images/docs/cluster-administration/cluster-status-monitoring/disk-throughput.png) ### IOPS **IOPS (Input/Output Operations Per Second)** represents a performance measurement of the number of read and write operations per second. Specifically, the IOPS of a disk is the sum of the number of continuous reads and writes per second. This indicator is of great significance for reference when small blocks of discontinuous data are being transmitted. -![IOPS](/images/docs/cluster-administration/cluster-status-monitoring/iops.png) - ### Network bandwidth The network bandwidth is the ability of the network card to receive or send data per second, shown in Mbps (megabits per second). -![Network Bandwidth](/images/docs/cluster-administration/cluster-status-monitoring/netework-bandwidth.png) - ### Pod status Pod status displays the total number of pods in different states, including **Running**, **Completed** and **Warning**. The pod tagged **Completed** usually refers to a Job or a CronJob. The number of pods marked **Warning**, which means an abnormal state, requires special attention. -![Pod Status](/images/docs/cluster-administration/cluster-status-monitoring/pod-status.png) +## etcd Monitoring -## ETCD Monitoring - -ETCD monitoring helps you to make better use of ETCD, especially to locate performance problems. The ETCD service provides metrics interfaces natively, and the KubeSphere monitoring system features a highly graphic and responsive dashboard to display its native data. +etcd monitoring helps you to make better use of etcd, especially to locate performance problems. The etcd service provides metrics interfaces natively, and the KubeSphere monitoring system features a highly graphic and responsive dashboard to display its native data. |Indicators|Description| |---|---| -|ETCD Nodes | - **Is there a Leader** indicates whether the member has a Leader. If a member does not have a Leader, it is completely unavailable. If all members in the cluster do not have any Leader, the entire cluster is completely unavailable.
- **Leader change times** refers to the number of Leader changes seen by members of the cluster since the beginning. Frequent Leader changes will significantly affect the performance of ETCD. It also shows that the Leader is unstable, possibly due to network connection issues or excessive loads hitting the ETCD cluster. | -|DB Size | The size of the underlying database (in MiB) of ETCD. The current graph shows the average size of each member database of ETCD. | -|Client Traffic|It includes the total traffic sent to the grpc client and the total traffic received from the grpc client. For more information about the indicator, see [etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network). | -|gRPC Stream Messages|The gRPC streaming message receiving rate and sending rate on the server side, which reflects whether large-scale data read and write operations are happening in the cluster. For more information about the indicator, see [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters).| +|Service Status | - **Leader exists** indicates whether the member has a Leader. If a member does not have a Leader, it is completely unavailable. If all members in the cluster do not have any Leader, the entire cluster is completely unavailable.
- **Leader changes in 1 h** refers to the number of Leader changes seen by members of the cluster in 1 hour. Frequent Leader changes will significantly affect the performance of etcd. It also shows that the Leader is unstable, possibly due to network connection issues or excessive loads hitting the etcd cluster. | +|DB Size | The size of the underlying database (in MiB) of etcd. The current graph shows the average size of each member database of etcd. | +|Client Traffic|It includes the total traffic sent to the gRPC client and the total traffic received from the gRPC client. For more information about the indicator, see [etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network). | +|gRPC Stream Message|The gRPC streaming message receiving rate and sending rate on the server side, which reflects whether large-scale data read and write operations are happening in the cluster. For more information about the indicator, see [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters).| |WAL Fsync|The latency of WAL calling fsync. A `wal_fsync` is called when etcd persists its log entries to disk before applying them. For more information about the indicator, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests). | -|DB Fsync|The submission delay distribution of the backend calls. When ETCD submits its most recent incremental snapshot to disk, a `backend_commit` will be called. Note that high latency of disk operations (long WAL log synchronization time or library synchronization time) usually indicates disk problems, which may cause high request latency or make the cluster unstable. For more information about the indicator, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests). | -|Raft Proposals|- **Proposal Commit Rate** records the rate of consensus proposals committed. If the cluster is healthy, this indicator should increase over time. Several healthy members of an ETCD cluster may have different general proposals at the same time. A continuous large lag between a single member and its leader indicates that the member is slow or unhealthy.
- **Proposal Apply Rate** records the total rate of consensus proposals applied. The ETCD server applies each committed proposal asynchronously. The difference between the **Proposal Commit Rate** and the **Proposal Apply Rate** should usually be small (only a few thousands even under high loads). If the difference between them continues to rise, it indicates that the ETCD server is overloaded. This can happen when using large-scale queries such as heavy range queries or large txn operations.
- **Proposal Failure Rate** records the total rate of failed proposals, usually related to two issues: temporary failures related to leader election or longer downtime due to a loss of quorum in the cluster.
- **Proposal Pending Total** records the current number of pending proposals. An increase in pending proposals indicates high client loads or members unable to submit proposals.
Currently, the data displayed on the dashboard is the average size of ETCD members. For more information about these indicators, see [etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server). | +|DB Fsync|The submission delay distribution of the backend calls. When etcd submits its most recent incremental snapshot to disk, a `backend_commit` will be called. Note that high latency of disk operations (long WAL log synchronization time or library synchronization time) usually indicates disk problems, which may cause high request latency or make the cluster unstable. For more information about the indicator, see [etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#grpc-requests). | +|Raft Proposal|- **Proposal Commit Rate** records the rate of consensus proposals committed. If the cluster is healthy, this indicator should increase over time. Several healthy members of an etcd cluster may have different general proposals at the same time. A continuous large lag between a single member and its leader indicates that the member is slow or unhealthy.
- **Proposal Apply Rate** records the total rate of consensus proposals applied. The etcd server applies each committed proposal asynchronously. The difference between the **Proposal Commit Rate** and the **Proposal Apply Rate** should usually be small (only a few thousands even under high loads). If the difference between them continues to rise, it indicates that the etcd server is overloaded. This can happen when using large-scale queries such as heavy range queries or large txn operations.
- **Proposal Failure Rate** records the total rate of failed proposals, usually related to two issues: temporary failures related to leader election or longer downtime due to a loss of quorum in the cluster.
- **Proposal Pending Total** records the current number of pending proposals. An increase in pending proposals indicates high client loads or members unable to submit proposals.
Currently, the data displayed on the dashboard is the average size of etcd members. For more information about these indicators, see [etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server). | -![ETCD Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/etcd-monitoring.png) +## API Server Monitoring -## APIServer Monitoring - -[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) is the hub for the interaction of all components in a Kubernetes cluster. The following table lists the main indicators monitored for the APIServer. +[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) is the hub for the interaction of all components in a Kubernetes cluster. The following table lists the main indicators monitored for the API Server. |Indicators|Description| |---|---| |Request Latency|Classified by HTTP request methods, the latency of resource request response in milliseconds.| -|Request Per Second|The number of requests accepted by kube-apiserver per second.| - -![APIServer Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png) +|Request per Second|The number of requests accepted by kube-apiserver per second.| ## Scheduler Monitoring @@ -166,10 +129,6 @@ ETCD monitoring helps you to make better use of ETCD, especially to locate perfo |Attempt Rate|Include the scheduling rate of successes, errors, and failures.| |Scheduling latency|End-to-end scheduling delay, which is the sum of scheduling algorithm delay and binding delay| -![Scheduler Monitoring](/images/docs/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png) +## Resource Usage Ranking -## Node Usage Ranking - -You can sort nodes in ascending and descending order by indicators such as CPU, Load Average, Memory, Local Storage, inode Utilization, and Pod Utilization. This enables administrators to quickly find potential problems or identify a node's insufficient resources. - -![Node Usage Ranking](/images/docs/cluster-administration/cluster-status-monitoring/node-usage-ranking.png) +You can sort nodes in ascending and descending order by indicators such as CPU usage, average CPU load, memory usage, disk usage, inode usage, and Pod usage. This enables administrators to quickly find potential problems or identify a node's insufficient resources. diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md index b8b399dd7..fe59103b8 100644 --- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md +++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md @@ -11,18 +11,16 @@ Alerting messages record detailed information of alerts triggered based on the a ## Prerequisites - You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). -- You need to create an account (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). +- You need to create a user (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). - You have created a node-level alerting policy and an alert has been triggered. For more information, refer to [Alerting Policies (Node Level)](../alerting-policy/). ## View Alerting Messages -1. Log in to the KubeSphere console as `cluster-admin` and navigate to **Alerting Messages** under **Monitoring & Alerting**. +1. Log in to the KubeSphere console as `cluster-admin` and go to **Alerting Messages** under **Monitoring & Alerting**. -2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and message you have defined in the notification of the alert. To view details of an alerting message, click the name of the alerting policy and then click the **Alerting Messages** tab on the page that appears. +2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and details you have defined for the alert. To view details of an alerting message, click the name of the alerting policy and then click the **Alerting History** tab on the alerting policy details page. - ![alert-message-page](/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-messages-node-level/alert-message-page.png) - -3. On the **Alerting Messages** tab, you can see alert severity, target resources, and alert time. +3. On the **Alerting History** tab, you can see alert severity, monitoring target, and activation time. ## View Notifications diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md index 6f299221a..097a2c16c 100644 --- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md +++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md @@ -14,7 +14,7 @@ KubeSphere also has built-in policies which will trigger alerts if conditions de - You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). - To receive alert notifications, you must configure a [notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) beforehand. -- You need to create an account (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). +- You need to create a user (`cluster-admin`) and grant it the `clusters-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-4-create-a-role). - You have workloads in your cluster. If they are not ready, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) to create a sample app. ## Create an Alerting Policy @@ -27,14 +27,14 @@ KubeSphere also has built-in policies which will trigger alerts if conditions de - **Name**. A concise and clear name as its unique identifier, such as `node-alert`. - **Alias**. Help you distinguish alerting policies better. - - **Duration (Minutes)**. An alert will be firing when the conditions defined for an alerting policy are met at any given point in the time range. + - **Threshold Duration (min)**. The status of the alerting policy becomes Firing when the duration of the condition configured in the alerting rule reaches the threshold. - **Severity**. Allowed values include **Warning**, **Error** and **Critical**, providing an indication of how serious an alert is. - **Description**. A brief introduction to the alerting policy. -4. On the **Alerting Rule** tab, you can use the rule template or create a custom rule. To use the template, fill in the following fields and click **Next** to continue. +4. On the **Rule Settings** tab, you can use the rule template or create a custom rule. To use the template, set the following parameters and click **Next** to continue. - - **Monitoring Target**. Select a node in your cluster for monitoring. - - **Alerting Rules**. Define a rule for the alerting policy. The rules provided in the drop-down list are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU and memory. + - **Monitoring Targets**. Select at lease a node in your cluster for monitoring. + - **Alerting Rule**. Define a rule for the alerting policy. The rules provided in the drop-down list are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU, and memory. {{< notice note >}} @@ -42,7 +42,7 @@ KubeSphere also has built-in policies which will trigger alerts if conditions de {{}} -5. On the **Notification Settings** tab, enter the alert summary and message to be included in your notification, then click **Create**. +5. On the **Message Settings** tab, enter the summary and details of the alerting message, then click **Create**. 6. An alerting policy will be **Inactive** when just created. If conditions in the rule expression are met, it will reach **Pending** first, and then turn to **Firing** if conditions keep to be met in the given time range. @@ -50,17 +50,15 @@ KubeSphere also has built-in policies which will trigger alerts if conditions de To edit an alerting policy after it is created, on the **Alerting Policies** page, click on the right of the alerting policy. -1. Click **Edit** from the drop-down list and edit the alerting policy following the same steps as you create it. Click **Update** on the **Notification Settings** page to save it. +1. Click **Edit** from the drop-down list and edit the alerting policy following the same steps as you create it. Click **OK** on the **Message Settings** page to save it. 2. Click **Delete** from the drop-down list to delete an alerting policy. ## View an Alerting Policy -Click the name of an alerting policy on the **Alerting Policies** page to see its detail information, including alerting rules and alerting messages. You can also see the rule expression which is based on the template you use when creating the alerting policy. +Click the name of an alerting policy on the **Alerting Policies** page to see its detail information, including the alerting rule and alerting history. You can also see the rule expression which is based on the template you use when creating the alerting policy. -Under **Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Notification Settings** displays the customized message you set in notifications. - -![alerting-policy-details-page](/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/alerting-policy-details-page.png) +Under **Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Alerting Message** displays the customized message you set in notifications. {{< notice note >}} diff --git a/content/en/docs/cluster-administration/nodes.md b/content/en/docs/cluster-administration/nodes.md index 039455b6a..565464357 100644 --- a/content/en/docs/cluster-administration/nodes.md +++ b/content/en/docs/cluster-administration/nodes.md @@ -13,29 +13,23 @@ This tutorial demonstrates what a cluster administrator can view and do for node ## Prerequisites -You need an account granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account. +You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. ## Node Status Cluster nodes are only accessible to cluster administrators. Some node metrics are very important to clusters. Therefore, it is the administrator's responsibility to watch over these numbers and make sure nodes are available. Follow the steps below to view node status. -1. Click **Platform** in the top-left corner and select **Cluster Management**. - - ![clusters-management-select](/images/docs/cluster-administration/node-management/clusters-management-select.jpg) +1. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. - ![select-a-cluster](/images/docs/cluster-administration/node-management/select-a-cluster.jpg) - 3. Choose **Cluster Nodes** under **Nodes**, where you can see detailed information of node status. - ![Node Status](/images/docs/cluster-administration/node-management/node_status.png) - - **Name**: The node name and subnet IP address. - **Status**: The current status of a node, indicating whether a node is available or not. - **Role**: The role of a node, indicating whether a node is a worker or master. - - **CPU**: The real-time CPU usage of a node. - - **Memory**: The real-time memory usage of a node. + - **CPU Usage**: The real-time CPU usage of a node. + - **Memory Usage**: The real-time memory usage of a node. - **Pods**: The real-time usage of Pods on a node. - **Allocated CPU**: This metric is calculated based on the total CPU requests of Pods on a node. It represents the amount of CPU reserved for workloads on this node, even if workloads are using fewer CPU resources. This figure is vital to the Kubernetes scheduler (kube-scheduler), which favors nodes with lower allocated CPU resources when scheduling a Pod in most cases. For more details, refer to [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - **Allocated Memory**: This metric is calculated based on the total memory requests of Pods on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources. @@ -48,20 +42,10 @@ Cluster nodes are only accessible to cluster administrators. Some node metrics a Click a node from the list and you can go to its detail page. -![Node Detail](/images/docs/cluster-administration/node-management/node_detail.png) - - **Cordon/Uncordon**: Marking a node as unschedulable is very useful during a node reboot or other maintenance. The Kubernetes scheduler will not schedule new Pods to this node if it's been marked unschedulable. Besides, this does not affect existing workloads already on the node. In KubeSphere, you mark a node as unschedulable by clicking **Cordon** on the node detail page. The node will be schedulable if you click the button (**Uncordon**) again. - **Labels**: Node labels can be very useful when you want to assign Pods to specific nodes. Label a node first (for example, label GPU nodes with `node-role.kubernetes.io/gpu-node`), and then add the label in **Advanced Settings** [when you create a workload](../../project-user-guide/application-workloads/deployments/#step-5-configure-advanced-settings) so that you can allow Pods to run on GPU nodes explicitly. To add node labels, click **More** and select **Edit Labels**. - ![drop-down-list-node](/images/docs/cluster-administration/node-management/drop-down-list-node.jpg) - - ![Label Node](/images/docs/cluster-administration/node-management/label_node.jpg) - - ![Assign pods to nodes](/images/docs/cluster-administration/node-management/assign_pods_to_node.jpg) - -- **Taints**: Taints allow a node to repel a set of pods. You add or remove node taints on the node detail page. To add or delete taints, click **More** and select **Taint Management** from the drop-down menu. - - ![add-taints](/images/docs/cluster-administration/node-management/add-taints.jpg) +- **Taints**: Taints allow a node to repel a set of pods. You add or remove node taints on the node detail page. To add or delete taints, click **More** and select **Edit Taints** from the drop-down menu. {{< notice note >}} Be careful when you add taints as they may cause unexpected behavior, leading to services unavailable. For more information, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). diff --git a/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md b/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md index 53ff3b973..a8688bae8 100644 --- a/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md +++ b/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md @@ -1,34 +1,34 @@ --- title: "Persistent Volumes and Storage Classes" -keywords: "storage, volume, pv, pvc, storage class, csi, Ceph RBD, Glusterfs, QingCloud, " +keywords: "storage, volume, pv, pvc, storage class, csi, Ceph RBD, GlusterFS, QingCloud, " description: "Learn basic concepts of PVs, PVCs and storage classes, and demonstrate how to manage storage classes and PVCs in KubeSphere." linkTitle: "Persistent Volumes and Storage Classes" weight: 8400 --- -This tutorial describes the basic concepts of PVs, PVCs and storage classes and demonstrates how a cluster administrator can manage storage classes and persistent volumes in KubeSphere. +This tutorial describes the basic concepts of PVs, PVCs, and storage classes and demonstrates how a cluster administrator can manage storage classes and persistent volumes in KubeSphere. ## Introduction -A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. PVs can be provisioned either [statically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static) or [dynamically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic). +A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using storage classes. PVs are volume plugins like volumes, but have a lifecycle independent of any individual Pod that uses the PV. PVs can be provisioned either [statically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static) or [dynamically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic). A PersistentVolumeClaim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. KubeSphere supports [dynamic volume provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) based on storage classes to create PVs. -A [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes) provides a way for administrators to describe the classes of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Each StorageClass has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. For which value to use, please read [the official Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) or check with your storage administrator. +A [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes) provides a way for administrators to describe the classes of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Each storage class has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. For which value to use, please read [the official Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) or check with your storage administrator. The table below summarizes common volume plugins for various provisioners (storage systems). | Type | Description | | -------------------- | ------------------------------------------------------------ | -| In-tree | Built-in and run as part of Kubernetes, such as [RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) and [Glusterfs](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). For more plugins of this kind, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). | +| In-tree | Built-in and run as part of Kubernetes, such as [RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) and [GlusterFS](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). For more plugins of this kind, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). | | External-provisioner | Deployed independently from Kubernetes, but works like an in-tree plugin, such as [nfs-client](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client). For more plugins of this kind, see [External Storage](https://github.com/kubernetes-retired/external-storage). | | CSI | Container Storage Interface, a standard for exposing storage resources to workloads on COs (for example, Kubernetes), such as [QingCloud-csi](https://github.com/yunify/qingcloud-csi) and [Ceph-CSI](https://github.com/ceph/ceph-csi). For more plugins of this kind, see [Drivers](https://kubernetes-csi.github.io/docs/drivers.html). | ## Prerequisites -You need an account granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account. +You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. ## Manage Storage Classes @@ -36,30 +36,25 @@ You need an account granted a role including the authorization of **Cluster Mana 2. If you have enabled the [multi-cluster feature](../../multicluster-management/) with member clusters imported, you can select a specific cluster. If you have not enabled the feature, refer to the next step directly. -3. On the **Cluster Management** page, go to **Storage Classes** under **Storage**, where you can create, update and delete a storage class. - - ![storage-class](/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg) +3. On the **Cluster Management** page, go to **Storage Classes** under **Storage**, where you can create, update, and delete a storage class. 4. To create a storage class, click **Create** and enter the basic information in the displayed dialog box. When you finish, click **Next**. -5. In KubeSphere, you can create storage classes for `QingCloud-CSI`, `Glusterfs`, and `Ceph RBD`. Alternatively, you can also create customized storage classes for other storage systems based on your needs. Select a type and click **Next**. - - ![create-storage-class-storage-system](/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png) - - ![create-storage-class-settings](/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png) +5. In KubeSphere, you can create storage classes for `QingCloud-CSI`, `GlusterFS`, and `Ceph RBD`. Alternatively, you can also create customized storage classes for other storage systems based on your needs. Select a type and click **Next**. ### Common settings -Some settings are commonly used and shared among storage classes. You can find them as dashboard properties on the console, which are also indicated by fields or annotations in the StorageClass manifest. You can see the manifest file in YAML format by enabling **Edit Mode** in the upper-right corner. +Some settings are commonly used and shared among storage classes. You can find them as dashboard parameters on the console, which are also indicated by fields or annotations in the StorageClass manifest. You can see the manifest file in YAML format by clicking **Edit YAML** in the upper-right corner. -Here are property descriptions of some commonly used fields in KubeSphere. +Here are parameter descriptions of some commonly used fields in KubeSphere. -| Property | Description | +| Parameter | Description | | :---- | :---- | -| Allow Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. When it is set to `true`, PVs can be configured to be expandable. For more information, see [Allow Volume Expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion). | -| Reclaiming Policy | Specified by `reclaimPolicy` in the manifest. It can be set to `Delete` or `Retain` (default). For more information, see [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy). | +| Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. When it is set to `true`, PVs can be configured to be expandable. For more information, see [Allow Volume Expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion). | +| Reclaim Policy | Specified by `reclaimPolicy` in the manifest. For more information, see [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy). | | Storage System | Specified by `provisioner` in the manifest. It determines what volume plugin is used for provisioning PVs. For more information, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). | -| Supported Access Mode | Specified by `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` in the manifest. It tells KubeSphere which [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) is supported. | +| Access Mode | Specified by `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` in the manifest. It tells KubeSphere which [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) is supported. | +| Volume Binding Mode | Specified by `volumeBindingMode` in the manifest. It determines what binding mode is used. **Delayed binding** means that a volume, after it is created, is bound to a volume instance when a Pod using this volume is created. **Immediate binding** means that a volume, after it is created, is immediately bound to a volume instance. | For other settings, you need to provide different information for different storage plugins, which, in the manifest, are always indicated under the field `parameters`. They will be described in detail in the sections below. You can also refer to [Parameters](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters) in the official documentation of Kubernetes. @@ -74,40 +69,40 @@ QingCloud CSI is a CSI plugin on Kubernetes for the storage service of QingCloud #### Settings -![storage-volume-qingcloud](/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.png) - -| Property | Description | +| Parameter | Description | | :---- | :---- | -| type | On the QingCloud platform, 0 represents high performance volumes. 2 represents high capacity volumes. 3 represents super high performance volumes. 5 represents Enterprise Server SAN. 100 represents standard volumes. 200 represents enterprise SSD. | -| maxSize | The volume size upper limit. | -| stepSize | The volume size increment. | -| minSize | The volume size lower limit. | -| fsType | Filesystem type of the volume: ext3, ext4 (default), xfs. | -| tags | The ID of QingCloud Tag resource, split by commas. | +| Type | On QingCloud Public Cloud Platform, 0 means high performance volume; 2 high capacity volume; 3 ultra-high performance volume; 5 enterprise server SAN (NeonSAN); 100 standard volume; 200 enterprise SSD. | +| Maximum Size | Maximum size of the volume. | +| Step Size | Step size of the volume. | +| Minimum Size | Minimum size of the volume. | +| File System Type | Supports ext3, ext4, and XFS. The default type is ext4. | +| Tag | Add tags to the storage volume. Use commas to separate multiple tags. | For more information about storage class parameters, see [QingCloud-CSI user guide](https://github.com/yunify/qingcloud-csi/blob/master/docs/user-guide.md#set-storage-class). -### Glusterfs +### GlusterFS -Glusterfs is an in-tree storage plugin on Kubernetes, which means you don't need to install a volume plugin additionally. +GlusterFS is an in-tree storage plugin on Kubernetes, which means you don't need to install a volume plugin additionally. #### Prerequisites -The Glusterfs storage system has already been installed. See [GlusterFS Installation Documentation](https://www.gluster.org/install/) for more information. +The GlusterFS storage system has already been installed. See [GlusterFS Installation Documentation](https://www.gluster.org/install/) for more information. #### Settings -| Property | Description | +| Parameter | Description | | :---- | :---- | -| resturl | The Gluster REST service/Heketi service url which provision gluster volumes on demand. | -| clusterid | The ID of the cluster which will be used by Heketi when provisioning the volume. | -| restauthenabled | Gluster REST service authentication boolean that enables authentication to the REST server. | -| restuser | The Glusterfs REST service/Heketi user who has access to create volumes in the Glusterfs Trusted Pool. | -| secretNamespace, secretName | The Identification of Secret instance that contains user password to use when talking to Gluster REST service. | -| gidMin, gidMax | The minimum and maximum value of GID range for the StorageClass. | -| volumetype | The volume type and its parameters can be configured with this optional value. | +| REST URL | Heketi REST URL that provisions volumes, for example, <Heketi Service cluster IP Address>:<Heketi Service port number>. | +| Cluster ID | Gluster cluster ID. | +| REST Authentication | Gluster enables authentication to the REST server. | +| REST User | Username of Gluster REST service or Heketi service. | +| Secret Namespace/Secret Name | Namespace of the Heketi user secret. | +| Secret Name | Name of the Heketi user secret. | +| Minimum GID | Minimum GID of the volume. | +| Maximum GID | Maximum GID of the volume. | +| Volume Type | Type of volume. The value can be none, replicate:<Replicate count>, or disperse:<Data>:<Redundancy count>. If the volume type is not set, the default volume type is replicate:3. | -For more information about StorageClass parameters, see [Glusterfs in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). +For more information about storage class parameters, see [GlusterFS in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). ### Ceph RBD @@ -117,8 +112,6 @@ but the storage server must be installed before you create the storage class of As **hyperkube** images were [deprecated since 1.17](https://github.com/kubernetes/kubernetes/pull/85094), in-tree Ceph RBD may not work without **hyperkube**. Nevertheless, you can use [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) as a substitute, whose format is the same as in-tree Ceph RBD. The only different parameter is `provisioner` (i.e **Storage System** on the KubeSphere console). If you want to use rbd-provisioner, the value of `provisioner` must be `ceph.com/rbd` (Enter this value in **Storage System** in the image below). If you use in-tree Ceph RBD, the value must be `kubernetes.io/rbd`. -![storage-system](/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png) - #### Prerequisites - The Ceph server has already been installed. See [Ceph Installation Documentation](https://docs.ceph.com/en/latest/install/) for more information. @@ -126,19 +119,19 @@ Nevertheless, you can use [rbd provisioner](https://github.com/kubernetes-incuba #### Settings -| Property | Description | +| Parameter | Description | | :---- | :---- | -| monitors| The Ceph monitors, comma delimited. | -| adminId| The Ceph client ID that is capable of creating images in the pool. | -| adminSecretName| The Secret Name for `adminId`. | -| adminSecretNamespace| The namespace for `adminSecretName`. | -| pool | The Ceph RBD pool. | +| Monitors| IP address of Ceph monitors. | +| adminId| Ceph client ID that is capable of creating images in the pool. | +| adminSecretName| Secret name of `adminId`. | +| adminSecretNamespace| Namespace of `adminSecretName`. | +| pool | Name of the Ceph RBD pool. | | userId | The Ceph client ID that is used to map the RBD image. | | userSecretName | The name of Ceph Secret for `userId` to map RBD image. | | userSecretNamespace | The namespace for `userSecretName`. | -| fsType | The fsType that is supported by Kubernetes. | -| imageFormat | The Ceph RBD image format, `1` or `2`. | -| imageFeatures| This parameter is optional and should only be used if you set `imageFormat` to `2`. | +| File System Type | File system type of the storage volume. | +| imageFormat | Option of the Ceph volume. The value can be `1` or `2`. `imageFeatures` needs to be filled when you set imageFormat to `2`. | +| imageFeatures| Additional function of the Ceph cluster. The value should only be set when you set imageFormat to `2`. | For more information about StorageClass parameters, see [Ceph RBD in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd). @@ -164,14 +157,13 @@ It is not recommended that you use NFS storage for production (especially on Kub #### Common Settings -![custom-storage-class](/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.png) - -| Property | Description | +| Parameter | Description | | :---- | :---- | +| Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. Select `No`. | +| Reclaim Policy | Specified by `reclaimPolicy` in the manifest. The value is `Delete` by default. | | Storage System | Specified by `provisioner` in the manifest. If you install the storage class by [charts for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner), it can be `cluster.local/nfs-client-nfs-client-provisioner`. | -| Allow Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. Select `No`. | -| Reclaiming Policy | Specified by `reclaimPolicy` in the manifest. The value is `Delete` by default. | -| Supported Access Mode | Specified by `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` in the manifest. `ReadWriteOnce`, `ReadOnlyMany` and `ReadWriteMany` all are selected by default. | +| Access Mode | Specified by `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` in the manifest. `ReadWriteOnce`, `ReadOnlyMany` and `ReadWriteMany` are all selected by default. | +| Volume Binding Mode | Specified by `volumeBindingMode` in the manifest. It determines what binding mode is used. **Delayed binding** means that a volume, after it is created, is bound to a volume instance when a Pod using this volume is created. **Immediate binding** means that a volume, after it is created, is immediately bound to a volume instance. | #### Parameters @@ -179,6 +171,55 @@ It is not recommended that you use NFS storage for production (especially on Kub | :---- | :---- | :----| | archiveOnDelete | Archive pvc when deleting | `true` | +### Storage class details page + +After you create a storage class, click the name of the storage class to go to its details page. On the details page, click **Edit YAML** to edit the manifest file of the storage class, or click **More** to select an operation from the drop-down menu: + +- **Set as Default Storage Class**: Set the storage class as the default storage class in the cluster. Only one default storage class is allowed in a KubeSphere cluster. +- **Volume Management**: Manage volume features, including: **Volume Clone**, **Volume Snapshot**, and **Volume Expansion**. Before enabling any features, you should contact your system administrator to confirm that the features are supported by the storage system. +- **Delete**: Delete the storage class and return to the previous page. + +On the **Volumes** tab, view the volumes associated to the storage class. + ## Manage Volumes Once the storage class is created, you can create volumes with it. You can list, create, update and delete volumes in **Volumes** under **Storage** on the KubeSphere console. For more details, please see [Volume Management](../../project-user-guide/storage/volumes/). + +## Manage Volume Instances + +A volume in KubeSphere is a [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) in Kubernetes, and a volume instance is a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) in Kubernetes. + +### Volume instance list page + +1. Log in to KubeSphere web console as `admin`. Click **Platform** in the upper-left corner, select **Cluster Management**, and click **Volumes** under **Storage**. +2. Click the **Volume Instances** tab on the **Volumes** page to view the volume instance list page that provides the following information: + - **Name**: Name of the volume instance. It is specified by the field `.metadata.name` in the manifest file of the volume instance. + - **Status**: Current status of the volume instance. It is specified by the field `.status.phase` in the manifest file of the volume instance, including: + - **Available**: The volume instance is available and not yet bound to a volume. + - **Bound**: The volume instance is bound to a volume. + - **Terminating**: The volume instance is being deleted. + - **Failed**: The volume instance is unavailable. + - **Capacity**: Capacity of the volume instance. It is specified by the field `.spec.capacity.storage` in the manifest file of the volume instance. + - **Access Mode**: Access mode of the volume instance. It is specified by the field `.spec.accessModes` in the manifest file of the volume instance, including: + - **RWO**: The volume instance can be mounted as read-write by a single node. + - **ROX**: The volume instance can be mounted as read-only by multiple nodes. + - **RWX**: The volume instance can be mounted as read-write by multiple nodes. + - **Recycling Strategy**: Recycling strategy of the volume instance. It is specified by the field `.spec.persistentVolumeReclaimPolicy` in the manifest file of the volume instance, including: + - **Retain**: When a volume is deleted, the volume instance still exists and requires manual reclamation. + - **Delete**: Remove both the volume instance and the associated storage assets in the volume plugin infrastructure. + - **Recycle**: Erase the data on the volume instance and make it available again for a new volume. + - **Creation Time**: Time when the volume instance was created. +3. Click on the right of a volume instance and select an operation from the drop-down menu: + - **Edit**: Edit the YAML file of a volume instance. + - **View YAML**: View the YAML file of the volume instance. + - **Delete**: Delete the volume instance. A volume instance in the **Bound** status cannot be deleted. + +### Volume instance details page + +1. Click the name of a volume instance to go to its details page. +2. On the details page, click **Edit Information** to edit the basic information of the volume instance. By clicking **More**, select an operation from the drop-down menu: + - **View YAML**: View the YAML file of the volume instance. + - **Delete**: Delete the volume instance and return to the list page. A volume instance in the **Bound** status cannot be deleted. +3. Click the **Resource Status** tab to view the volumes to which the volume instance is bound. +4. Click the **Metadata** tab to view the labels and annotations of the volume instance. +5. Click the **Events** tab to view the events of the volume instance. diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md index 6f847dab3..b63db2076 100644 --- a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md @@ -1,10 +1,38 @@ --- -title: "Configure DingTalk" -keywords: 'KubeSphere, Kubernetes, custom, platform' -description: '' -linkTitle: "Configure DingTalk" -weight: 8722 +title: "Configure DingTalk Notifications" +keywords: 'KubeSphere, Kubernetes, DingTalk, Alerting, Notification' +description: 'Learn how to configure a Dingtalk conversation or chatbot to receive platform notifications sent by KubeSphere.' +linkTitle: "Configure DingTalk Notifications" +weight: 8723 --- -TBD +[DingTalk](https://www.dingtalk.com/en) is an enterprise-grade communication and collaboration platform. It integrates messaging, conference calling, task management, and other features into a single application. + +This document describes how to configure a DingTalk conversation or chatbot to receive platform notifications sent by KubeSphere. + +## Prerequisites + +- You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to have a DingTalk account. +- You need to create an applet on [DingTalk Admin Panel](https://oa.dingtalk.com/index.htm#/microApp/microAppList) and make necessary configurations according to [DingTalk API documentation](https://developers.dingtalk.com/document/app/create-group-session). + +## Configure DingTalk Conversation or Chatbot + +1. Log in to the KubeSphere console as `admin`. +2. Click **Platform** in the upper-left corner and select **Platform Settings**. +3. In the left navigation pane, click **Notification Configuration** under **Notification Management**. +4. On the **DingTalk** page, select the **Conversation Settings** tab and configure the following parameters: + - **AppKey**: The AppKey of the applet created on DingTalk. + - **AppSecret**: The AppSecret of the applet created on DingTalk. + - **Conversation ID**: The conversation ID obtained on DingTalk. To add a conversation ID, enter your conversation ID and click **Add** to add it. +5. (Optional) On the **DingTalk** page, select the **DingTalk Chatbot** tab and configure the following parameters: + - **Webhook URL**: The webhook URL of your DingTalk robot. + - **Secret**: The secret of your DingTalk robot. + - **Keywords**: The keywords you added to your DingTalk robot. To add a keyword, enter your keyword and click **Add** to add it. +6. To specify notification conditions, select the **Notification Conditions** checkbox. Specify a label, an operator, and values and click **Add** to add it. You will receive only notifications that meet the conditions. +7. After the configurations are complete, click **Send Test Message** to send a test message. +8. If you successfully receive the test message, click **OK** to save the configurations. +9. To enable DingTalk notifications, turn the toggle in the upper-right corner to **Enabled**. + + diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-email.md b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-email.md index 67556a013..afd6d98b0 100644 --- a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-email.md +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-email.md @@ -1,46 +1,67 @@ --- title: "Configure Email Notifications" keywords: 'KubeSphere, Kubernetes, custom, platform' -description: 'Configure your email server and add recipients to receive email notifications from alerting policies, kube-events, and kube-auditing.' +description: 'Configure a email server and add recipients to receive email notifications.' linkTitle: "Configure Email Notifications" -weight: 8721 +weight: 8722 --- -This tutorial demonstrates how to configure your email server and add recipients, who can receive email notifications for alerting policies. +This tutorial demonstrates how to configure a email server and add recipients to receive email notifications of alerting policies. ## Configure the Email Server -1. Log in to the web console with an account granted the role `platform-admin`. +1. Log in to the web console with a user granted the role `platform-admin`. -2. Click **Platform** in the top-left corner and select **Platform Settings**. +2. Click **Platform** in the upper-left corner and select **Platform Settings**. -3. Navigate to **Email** under **Notification Management**. - - ![emai-server](/images/docs/cluster-administration/platform-settings/notification-management/configure-email/email-server.png) +3. Navigate to **Notification Configuration** under **Notification Management**, and then choose **Email**. 4. Under **Server Settings**, configure your email server by filling in the following fields. - - **SMTP Server Address**: The SMTP server address that can provide email services. The port is usually `25`. + - **SMTP Server Address**: The SMTP server address that provides email services. The port is usually `25`. - **Use SSL Secure Connection**: SSL can be used to encrypt emails, thereby improving the security of information transmitted by email. Usually you have to configure the certificate for the email server. - - **SMTP User**: The SMTP account. + - **SMTP Username**: The SMTP account. - **SMTP Password**: The SMTP account password. - - **Sender Email Address**: The sender's email address. Customized email addresses are currently not supported. + - **Sender Email Address**: The sender's email address. -5. Click **Save**. +5. Click **OK**. -## Add Recipients +## Recepient Settings + +### Add recipients 1. Under **Recipient Settings**, enter a recipient's email address and click **Add**. -2. After it is added, the email address of a recipient will be listed under **Recipient Settings**. You can add up to 50 recipients and all of them will be able to receive email notifications of alerts. +2. After it is added, the email address of a recipient will be listed under **Recipient Settings**. You can add up to 50 recipients and all of them will be able to receive email notifications. -3. To remove a recipient, hover over the email address you want to remove, then click the trash bin icon that appears. +3. To remove a recipient, hover over the email address you want to remove, then click . -4. To make sure notifications will be sent to your recipients, turn on **Receive Notifications** and click **Update**. +### Set notification conditions + +1. Select the checkbox on the left of **Notification Conditions** to set notification conditions. + + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +2. You can click **Add** to add notification conditions. + +3. You can click on the right of a notification condition to delete the condition. + +4. After the configurations are complete, you can click **Send Test Message** for verification. + +5. On the upper-right corner, you can turn on the **Disabled** toggle to enbale notifications, or turn off the **Enabled** toggle to diable them. {{< notice note >}} - If you change the existing configuration, you must click **Update** to apply it. + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. {{}} @@ -48,10 +69,6 @@ This tutorial demonstrates how to configure your email server and add recipients After you configure the email server and add recipients, you need to enable [KubeSphere Alerting](../../../../pluggable-components/alerting/) and create an alerting policy for workloads or nodes. Once it is triggered, all the recipients can receive email notifications. -The image below is an email notification example: - -![example-email-notification](/images/docs/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png) - {{< notice note >}} - If you update your email server configuration, KubeSphere will send email notifications based on the latest configuration. diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-slack.md b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-slack.md index 0a2bca4cb..3a0c2d25c 100644 --- a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-slack.md +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-slack.md @@ -3,7 +3,7 @@ title: "Configure Slack Notifications" keywords: 'KubeSphere, Kubernetes, Slack, notifications' description: 'Configure Slack notifications and add channels to receive notifications from alerting policies, kube-events, and kube-auditing.' linkTitle: "Configure Slack Notifications" -weight: 8724 +weight: 8725 --- This tutorial demonstrates how to configure Slack notifications and add channels, which can receive notifications for alerting policies. @@ -24,24 +24,18 @@ You need to create a Slack app first so that it can help you send notifications 4. From the left navigation bar, select **OAuth & Permissions** under **Features**. On the **Auth & Permissions** page, scroll down to **Scopes** and click **Add an OAuth Scope** under **Bot Token Scopes** and **User Token Scopes** respectively. Select the **chart:write** permission for both scopes. - ![slack-scope](/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png) - 5. Scroll up to **OAuth Tokens & Redirect URLs** and click **Install to Workspace**. Grant the permission to access your workspace for the app and you can find created tokens under **OAuth Tokens for Your Team**. - ![oauth-token](/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png) - ## Configure Slack Notifications on the KubeSphere Console You must provide the Slack token on the console for authentication so that KubeSphere can send notifications to your channel. -1. Log in to the web console with an account granted the role `platform-admin`. +1. Log in to the web console with a user granted the role `platform-admin`. 2. Click **Platform** in the top-left corner and select **Platform Settings**. 3. Navigate to **Slack** under **Notification Management**. - ![slack-notification](/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png) - 4. For **Slack Token** under **Server Settings**, you can enter either a User OAuth Token or a Bot User OAuth Token for authentication. If you use the User OAuth Token, it is the app owner that will send notifications to your Slack channel. If you use the Bot User OAuth Token, it is the app that will send notifications. 5. Under **Channel Settings**, enter a Slack channel where you want to receive notifications and click **Add**. @@ -56,26 +50,39 @@ You must provide the Slack token on the console for authentication so that KubeS 7. Click **Save**. -8. To make sure notifications will be sent to a Slack channel, turn on **Receive Notifications** and click **Update**. +8. Select the checkbox on the left of **Notification Conditions** to set notification conditions. + + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +9. You can click **Add** to add notification conditions, or click on the right of a notification condition to delete the condition. + +10. After the configurations are complete, you can click **Send Test Message** for verification. + +11. To make sure notifications will be sent to a Slack channel, turn on **Receive Notifications** and click **Update**. {{< notice note >}} - If you change the existing configuration, you must click **Update** to apply it. + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. {{}} 9. If you want the app to be the notification sender, make sure it is in the channel. To add it in a Slack channel, enter `/invite @` in your channel. - ![add-app](/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png) - ## Receive Slack Notifications After you configure Slack notifications and add channels, you need to enable [KubeSphere Alerting](../../../../pluggable-components/alerting/) and create an alerting policy for workloads or nodes. Once it is triggered, all the channels in the list can receive notifications. The image below is a Slack notification example: -![example-notification](/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/example-notification.png) - {{< notice note >}} - If you update your Slack notification configuration, KubeSphere will send notifications based on the latest configuration. diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md index 87d40d072..12d352a1c 100644 --- a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md @@ -2,8 +2,8 @@ title: "Configure Webhook Notifications" keywords: 'KubeSphere, Kubernetes, custom, platform, webhook' description: 'Configure a webhook server to receive platform notifications through the webhook.' -linkTitle: "Configure Webhook notifications" -weight: 8725 +linkTitle: "Configure Webhook Notifications" +weight: 8726 --- A webhook is a way for an app to send notifications triggered by specific events. It delivers information to other applications in real time, allowing users to receive notifications immediately. @@ -12,7 +12,7 @@ This tutorial describes how to configure a webhook server to receive platform no ## Prerequisites -You need to prepare an account granted the `platform-admin` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../../quick-start/create-workspace-and-project/). +You need to prepare a user granted the `platform-admin` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). ## Configure the Webhook Server @@ -20,21 +20,44 @@ You need to prepare an account granted the `platform-admin` role. For more infor 2. Click **Platform** in the upper-left corner and select **Platform Settings**. -3. In the left nevigation pane, click **Webhook** under **Notification Management**. +3. In the left nevigation pane, click **Notification Configuration** under **Notification Management**, and select **Webhook**. -4. On the **Webhook** page, configure the following parameters: +4. On the **Webhook** tab page, set the following parameters: - - **URL**: URL of the webhook server. + - **Webhook URL**: URL of the webhook server. - **Verification Type**: Webhook authentication method. - - **No Auth**: Skips authentication. All notifications can be sent to the URL. - - **Bearer Token**: Uses a token for authentication. - - **Basic Auth**: Uses a username and password for authentication. + - **No authentication**: Skips authentication. All notifications can be sent to the URL. + - **Bearer token**: Uses a token for authentication. + - **Basic authentication**: Uses a username and password for authentication. - {{< notice note>}}Currently, KubeSphere does not suppot TLS connections (HTTPS). You need to select **Skip TLS Certification** if you use an HTTPS URL. + {{< notice note>}}Currently, KubeSphere does not suppot TLS connections (HTTPS). You need to select **Skip TLS verification (insecure)** if you use an HTTPS URL. {{}} -5. Under **Notification Settings**, turn on/off the **Receive Notifications** toggle to start/stop sending notifications to the webhook. +5. Select the checkbox on the left of **Notification Conditions** to set notification conditions. -6. Click **Save** after you finish. + - **Label**: Name, severity, or monitoring target of an alerting policy. You can select a label or customize a label. + - **Operator**: Mapping between the label and the values. The operator includes **Includes values**, **Does not include values**, **Exists**, and **Does not exist**. + - **Values**: Values associated with the label. + {{< notice note >}} + + - Operators **Includes values** and **Does not include values** require one or more label values. Use a carriage return to separate values. + - Operators **Exists** and **Does not exist** determine whether a label exists, and do not require a label value. + + {{}} + +6. You can click **Add** to add notification conditions, or click on the right of a notification condition to delete the condition. + +7. After the configurations are complete, you can click **Send Test Message** for verification. + +8. On the upper-right corner, you can turn on the **Disabled** toggle to enbale notifications, or turn off the **Enabled** toggle to diable them. + +9. Click **OK** after you finish. + + {{< notice note >}} + + - After the notification conditions are set, the recepients will receive only notifications that meet the conditions. + - If you change the existing configuration, you must click **OK** to apply it. + + {{}} diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md index 2080fa9a8..0788a1b0b 100644 --- a/content/en/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md @@ -1,10 +1,33 @@ --- -title: "Configure WeCom" -keywords: 'KubeSphere, Kubernetes, custom, platform' -description: '' -linkTitle: "Configure WeCom" -weight: 8723 +title: "Configure WeCom Notifications" +keywords: 'KubeSphere, Kubernetes, WeCom, Alerting, Notification' +description: 'Learn how to configure a WeCom server to receive platform notifications sent by KubeSphere.' +linkTitle: "Configure WeCom Notifications" +weight: 8724 --- +[WeCom](https://work.weixin.qq.com/) is a communication platform for enterprises that includes convenient communication and office automation tools. +This document describes how to configure a WeCom server to receive platform notifications sent by KubeSphere. + +## Prerequisites + +- You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to have a [WeCom account](https://work.weixin.qq.com/wework_admin/register_wx?from=myhome). +- You need to create a self-built application on the [WeCom Admin Console](https://work.weixin.qq.com/wework_admin/loginpage_wx) and obtain its AgentId and Secret. + +## Configure WeCom Server + +1. Log in to the KubeSphere console as `admin`. +2. Click **Platform** in the upper-left corner and select **Platform Settings**. +3. In the left navigation pane, click **Notification Configuration** under **Notification Management**. +4. On the **WeCom** page, set the following fields under **Server Settings**: + - **Corporation ID**: The Corporation ID of your WeCom account. + - **App AgentId**: The AgentId of the self-built application. + - **App Secret**: The Secret of the self-built application. +5. To add notification recipients, select **User ID**, **Department ID**, or **Tag ID** under **Recipient Settings**, enter a corresponding ID obtained from your WeCom account, and click **Add** to add it. +6. To specify notification conditions, select the **Notification Conditions** checkbox. Specify a label, an operator, and values and click **Add** to add it. You will receive only notifications that meet the conditions. +7. After the configurations are complete, click **Send Test Message** to send a test message. +8. If you successfully receive the test message, click **OK** to save the configurations. +9. To enable WeCom notifications, turn the toggle in the upper-right corner to **Enabled**. diff --git a/content/en/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md b/content/en/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md new file mode 100644 index 000000000..e05508943 --- /dev/null +++ b/content/en/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md @@ -0,0 +1,40 @@ +--- +title: "Customize Cluster Name in Notification Messages" +keywords: 'KubeSphere, Kubernetes, Platform, Notification' +description: 'Learn how to customize cluster name in notification messages sent by KubeSphere.' +linkTitle: "Customize Cluster Name in Notification Messages" +weight: 8721 +--- + +This document describes how to customize your cluster name in notification messages sent by KubeSphere. + +## Prerequisites + +You need to have a user with the `platform-admin` role, for example, the `admin` user. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). + +## Customize Cluster Name in Notification Messages + +1. Log in to the KubeSphere console as `admin`. + +2. Click in the lower-right corner and select **Kubectl**. + +3. In the displayed dialog box, run the following command: + + ```bash + kubectl edit nm notification-manager + ``` + +4. Add a field `cluster` under `.spec.receiver.options.global` to customize your cluster name: + + ```yaml + spec: + receivers: + options: + global: + cluster: + ``` + +5. When you finish, save the changes. + + + diff --git a/content/en/docs/devops-user-guide/examples/a-maven-project.md b/content/en/docs/devops-user-guide/examples/a-maven-project.md index 9f7e1f4d4..ca13e5092 100644 --- a/content/en/docs/devops-user-guide/examples/a-maven-project.md +++ b/content/en/docs/devops-user-guide/examples/a-maven-project.md @@ -1,6 +1,6 @@ --- title: "Build and Deploy a Maven Project" -keywords: 'kubernetes, docker, devops, jenkins, maven' +keywords: 'Kubernetes, Docker, DevOps, Jenkins, Maven' description: 'Learn how to build and deploy a Maven project using a KubeSphere pipeline.' linkTitle: "Build and Deploy a Maven Project" weight: 11430 @@ -10,7 +10,7 @@ weight: 11430 - You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/). - You need to have a [Docker Hub](http://www.dockerhub.com/) account. -- You need to create a workspace, a DevOps project, and a user account, and this account needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project, and a user account, and this user needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Workflow for a Maven Project @@ -57,43 +57,35 @@ The Pod labeled `maven` uses the docker-in-docker network to run the pipeline. N ### Prepare for the Maven project - Ensure you build the Maven project successfully on the development device. -- Add the Dockerfile to the project repository to build the image. For more information, refer to . -- Add the YAML file to the project repository to deploy the workload. For more information, refer to . If there are different environments, you need to prepare multiple deployment files. +- Add the Dockerfile to the project repository to build the image. For more information, refer to . +- Add the YAML file to the project repository to deploy the workload. For more information, refer to . If there are different environments, you need to prepare multiple deployment files. ### Create credentials | Credential ID | Type | Where to Use | | --------------- | ------------------- | ---------------------------- | -| dockerhub-id | Account Credentials | Registry, such as Docker Hub | +| dockerhub-id | Username and password | Registry, such as Docker Hub | | demo-kubeconfig | kubeconfig | Workload deployment | For details, refer to the [Credential Management](../../how-to-use/credential-management/). -![view-credential-list](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-credential-list.png) - ### Create a project for workloads In this example, all workloads are deployed in `kubesphere-sample-dev`. You must create the project `kubesphere-sample-dev` in advance. -![view-namespace](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-namespace.png) - ### Create a pipeline for the Maven project 1. Go to **Pipelines** of your DevOps project and click **Create** to create a pipeline named `maven`. For more information, see [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel/). -2. Go to the detail page of the pipeline and click **Edit Jenkinsfile**. +2. Go to the details page of the pipeline and click **Edit Jenkinsfile**. - ![edit-jenkinsfile](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/edit-jenkinsfile.png) - -3. Copy and paste the following content into the pop-up window. You must replace the value of `DOCKERHUB_NAMESPACE` with yours. When you finish editing, click **OK** to save the Jenkinsfile. +3. Copy and paste the following content into the displayed dialog box. You must replace the value of `DOCKERHUB_NAMESPACE` with yours. When you finish editing, click **OK** to save the Jenkinsfile. ```groovy pipeline { - agent { - node { - label 'maven' + agent { + label 'maven' } - } parameters { string(name:'TAG_NAME',defaultValue: '',description:'') @@ -105,21 +97,23 @@ In this example, all workloads are deployed in `kubesphere-sample-dev`. You must REGISTRY = 'docker.io' // need to replace by yourself dockerhub namespace DOCKERHUB_NAMESPACE = 'Docker Hub Namespace' - APP_NAME = 'devops-java-sample' + APP_NAME = 'devops-maven-sample' BRANCH_NAME = 'dev' + PROJECT_NAME = 'kubesphere-sample-dev' } stages { stage ('checkout scm') { steps { - git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git" + // Please avoid committing your test changes to this repository + git branch: 'master', url: "https://github.com/kubesphere/devops-maven-sample.git" } } stage ('unit test') { steps { container ('maven') { - sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test' + sh 'mvn clean test' } } } @@ -127,7 +121,7 @@ In this example, all workloads are deployed in `kubesphere-sample-dev`. You must stage ('build & push') { steps { container ('maven') { - sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package' + sh 'mvn -Dmaven.test.skip=true clean package' sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' @@ -138,9 +132,17 @@ In this example, all workloads are deployed in `kubesphere-sample-dev`. You must } stage('deploy to dev') { - steps { - kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID") - } + steps { + container ('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } } } } @@ -148,22 +150,12 @@ In this example, all workloads are deployed in `kubesphere-sample-dev`. You must 4. You can see stages and steps are automatically created on graphical editing panels. - ![view-edit-jenkinsfile](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-edit-jenkinsfile.png) - ### Run and test -1. Click **Run**, enter `v1` for **TAG_NAME** in the dialog that appears, and then click **OK** to run the pipeline. +1. Click **Run**, enter `v1` for **TAG_NAME** in the displayed dialog box, and then click **OK** to run the pipeline. - ![run-maven-pipeline](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/run-maven-pipeline.png) +2. When the pipeline runs successfully, you can go to the **Run Records** tab to view its details. -2. When the pipeline runs successfully, you can go to the **Activity** tab to view its details. +3. In the project of `kubesphere-sample-dev`, new workloads were created. - ![view-result-maven-pipeline](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-pipeline.png) - -3. In the project of `kubesphere-sample-dev`, there are new workloads created. - - ![view-result-maven-workload](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload.png) - -4. You can view the access address of the Service as below. - - ![view-result-maven-workload-svc](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload-svc.png) +4. On the **Services** page, view the external access information about the Service created. diff --git a/content/en/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md b/content/en/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md index d9ea16cb2..6b212fcd9 100644 --- a/content/en/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md +++ b/content/en/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md @@ -12,11 +12,11 @@ This tutorial demonstrates how to create a multi-cluster pipeline on KubeSphere. ## Prerequisites -- You need to have three Kubernetes clusters with KubeSphere installed. Choose one cluster as your Host Cluster and the other two as your Member Clusters. For more information about cluster roles and how to build a multi-cluster environment on KubeSphere, refer to [Multi-cluster Management](../../../multicluster-management/). -- You need to set your Member Clusters as [public clusters](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#make-a-cluster-public). Alternatively, you can [set cluster visibility after a workspace is created](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#set-cluster-visibility-after-a-workspace-is-created). -- You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/) on your Host Cluster. +- You need to have three Kubernetes clusters with KubeSphere installed. Choose one cluster as your host cluster and the other two as your member clusters. For more information about cluster roles and how to build a multi-cluster environment on KubeSphere, refer to [Multi-cluster Management](../../../multicluster-management/). +- You need to set your member clusters as [public clusters](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#make-a-cluster-public). Alternatively, you can [set cluster visibility after a workspace is created](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#set-cluster-visibility-after-a-workspace-is-created). +- You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/) on your host cluster. - You need to integrate SonarQube into your pipeline. For more information, refer to [Integrate SonarQube into Pipelines](../../how-to-integrate/sonarqube/). -- You need to create four accounts on your Host Cluster: `ws-manager`, `ws-admin`, `project-admin`, and `project-regular`, and grant these accounts different roles. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-1-create-an-account). +- You need to create four accounts on your host cluster: `ws-manager`, `ws-admin`, `project-admin`, and `project-regular`, and grant these accounts different roles. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-1-create-an-account). ## Workflow Overview @@ -34,39 +34,31 @@ See the table below for the role of each cluster. | Cluster Name | Cluster Role | Usage | | ------------ | -------------- | ----------- | -| host | Host Cluster | Testing | -| shire | Member Cluster | Production | -| rohan | Member Cluster | Development | +| host | Host cluster | Testing | +| shire | Member cluster | Production | +| rohan | Member cluster | Development | {{< notice note >}} -These Kubernetes clusters can be hosted across different cloud providers and their Kubernetes versions can also vary. Recommended Kubernetes versions for KubeSphere v3.1.0: v1.17.9, v1.18.8, v1.19.8 and v1.20.4. +These Kubernetes clusters can be hosted across different cloud providers and their Kubernetes versions can also vary. Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x, and v1.22.x (experimental). {{}} ### Step 2: Create a workspace -1. Log in to the web console of the Host Cluster as `ws-manager`. On the **Workspaces** page, click **Create**. +1. Log in to the web console of the host cluster as `ws-manager`. On the **Workspaces** page, click **Create**. 2. On the **Basic Information** page, name the workspace `devops-multicluster`, select `ws-admin` for **Administrator**, and click **Next**. - ![create-workspace](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-workspace.png) +3. On the **Cluster Settings** page, select all three clusters and click **Create**. -3. On the **Select Clusters** page, select all three clusters and click **Create**. - - ![select-all-clusters](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/select-all-clusters.png) - -4. The workspace created will display in the list. You need to log out of the console and log back in as `ws-admin` to invite both `project-admin` and `project-regular` to the workspace and grant them the role `workspace-self-provisioner` and `workspace-viewer` respectively. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace). - - ![workspace-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/workspace-created.png) +4. The workspace created is displayed in the list. You need to log out of the console and log back in as `ws-admin` to invite both `project-admin` and `project-regular` to the workspace and grant them the role `workspace-self-provisioner` and `workspace-viewer` respectively. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace). ### Step 3: Create a DevOps project 1. Log out of the console and log back in as `project-admin`. Go to the **DevOps Projects** page and click **Create**. -2. In the dialog that appears, enter `multicluster-demo` for **Name**, select **host** for **Cluster Settings**, and then click **OK**. - - ![devops-project](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project.png) +2. In the displayed dialog box, enter `multicluster-demo` for **Name**, select **host** for **Cluster Settings**, and then click **OK**. {{< notice note >}} @@ -74,13 +66,11 @@ These Kubernetes clusters can be hosted across different cloud providers and the {{}} -3. The DevOps project created will display in the list. Make sure you invite the account `project-regular` to this project with the role `operator`. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-5-create-a-devops-project-optional). - - ![devops-project-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project-created.png) +3. The DevOps project created is displayed in the list. Make sure you invite the `project-regular` user to this project and assign it the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-5-create-a-devops-project-optional). ### Step 4: Create projects on clusters -You must create the projects as shown in the table below in advance. Make sure you invite the account `project-regular` to these projects with the role `operator`. For more information about how to create a project, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/#step-3-create-a-project). +You must create the projects as shown in the table below in advance. Make sure you invite the `project-regular` user to these projects and assign it the `operator` role. For more information about how to create a project, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/#step-3-create-a-project). | Cluster Name | Usage | Project Name | | ------------ | ----------- | ---------------------- | @@ -92,37 +82,31 @@ You must create the projects as shown in the table below in advance. Make sure y 1. Log out of the console and log back in as `project-regular`. On the **DevOps Projects** page, click the DevOps project `multicluster-demo`. -2. On the **DevOps Credentials** page, you need to create the credentials as shown in the table below. For more information about how to create credentials, refer to [Credential Management](../../how-to-use/credential-management/#create-credentials) and [Create a Pipeline Using a Jenkinsfile](../../how-to-use/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials). +2. On the **Credentials** page, you need to create the credentials as shown in the table below. For more information about how to create credentials, refer to [Credential Management](../../how-to-use/credential-management/#create-credentials) and [Create a Pipeline Using a Jenkinsfile](../../how-to-use/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials). | Credential ID | Type | Where to Use | | ------------- | ------------------- | ---------------------------------- | - | host | kubeconfig | The Host Cluster for testing | - | shire | kubeconfig | The Member Cluster for production | - | rohan | kubeconfig | The Member Cluster for development | + | host | kubeconfig | The host cluster for testing | + | shire | kubeconfig | The member cluster for production | + | rohan | kubeconfig | The member cluster for development | | dockerhub-id | Account Credentials | Docker Hub | | sonar-token | Secret Text | SonarQube | {{< notice note >}} - You have to manually enter the kubeconfig of your Member Clusters when creating the kubeconfig credentials `shire` and `rohan`. Make sure your Host Cluster can access the APIServer addresses of your Member Clusters. + You have to manually enter the kubeconfig of your member clusters when creating the kubeconfig credentials `shire` and `rohan`. Make sure your host cluster can access the API Server addresses of your member clusters. {{}} -3. You will have five credentials in total. - - ![credentials-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/credentials-created.png) +3. Five credentials are created in total. ### Step 6: Create a pipeline -1. Go to the **Pipelines** page and click **Create**. In the dialog that appears, enter `build-and-deploy-application` for **Name** and click **Next**. +1. Go to the **Pipelines** page and click **Create**. In the displayed dialog box, enter `build-and-deploy-application` for **Name** and click **Next**. - ![pipeline-name](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-name.png) +2. On the **Advanced Settings** tab, click **Create** to use the default settings. -2. In the **Advanced Settings** tab, click **Create** to use the default settings. - -3. The pipeline created will display in the list. Click it to go to its detail page. - - ![pipeline-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-created.png) +3. The pipeline created is displayed in the list. Click its name to go to the details page. 4. Click **Edit Jenkinsfile** and copy and paste the following contents. Make sure you replace the value of `DOCKERHUB_NAMESPACE` with your own value, and then click **OK**. @@ -145,7 +129,7 @@ You must create the projects as shown in the table below in advance. Make sure y REGISTRY = 'docker.io' DOCKERHUB_NAMESPACE = 'your Docker Hub account ID' - APP_NAME = 'devops-java-sample' + APP_NAME = 'devops-maven-sample' SONAR_CREDENTIAL_ID = 'sonar-token' TAG_NAME = "SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER" } @@ -153,16 +137,15 @@ You must create the projects as shown in the table below in advance. Make sure y stage('checkout') { steps { container('maven') { - git branch: 'master', url: 'https://github.com/kubesphere/devops-java-sample.git' + git branch: 'master', url: 'https://github.com/kubesphere/devops-maven-sample.git' } } } stage('unit test') { steps { container('maven') { - sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test' + sh 'mvn clean test' } - } } stage('sonarqube analysis') { @@ -170,7 +153,7 @@ You must create the projects as shown in the table below in advance. Make sure y container('maven') { withCredentials([string(credentialsId: "$SONAR_CREDENTIAL_ID", variable: 'SONAR_TOKEN')]) { withSonarQubeEnv('sonar') { - sh "mvn sonar:sonar -o -gs `pwd`/configuration/settings.xml -Dsonar.login=$SONAR_TOKEN" + sh "mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN" } } @@ -181,15 +164,13 @@ You must create the projects as shown in the table below in advance. Make sure y stage('build & push') { steps { container('maven') { - sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package' + sh 'mvn -Dmaven.test.skip=true clean package' sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' } - } - } } stage('push latest') { @@ -198,29 +179,51 @@ You must create the projects as shown in the table below in advance. Make sure y sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' } - } } stage('deploy to dev') { steps { - kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$DEV_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.DEV_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/dev-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } stage('deploy to staging') { steps { - input(id: 'deploy-to-staging', message: 'deploy to staging?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$TEST_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + input(id: 'deploy-to-staging', message: 'deploy to staging?') + withCredentials([ + kubeconfigFile( + credentialsId: env.TEST_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } stage('deploy to production') { steps { - input(id: 'deploy-to-production', message: 'deploy to production?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$PROD_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + input(id: 'deploy-to-production', message: 'deploy to production?') + withCredentials([ + kubeconfigFile( + credentialsId: env.PROD_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } } } - ``` {{< notice note >}} @@ -231,33 +234,18 @@ You must create the projects as shown in the table below in advance. Make sure y 5. After the pipeline is created, you can view its stages and steps on the graphical editing panel as well. - ![pipeline-panel](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-panel.png) - ### Step 7: Run the pipeline and check the results 1. Click **Run** to run the pipeline. The pipeline will pause when it reaches the stage **deploy to staging** as resources have been deployed to the cluster for development. You need to manually click **Proceed** twice to deploy resources to the testing cluster `host` and the production cluster `shire`. - ![deploy-to-staging](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/deploy-to-staging.png) +2. After a while, you can see the pipeline status shown as **Successful**. -2. After a while, you can see the pipeline status shown as **Success**. +3. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. - ![pipeline-success](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-success.png) +4. Once the pipeline runs successfully, click **Code Check** to check the results through SonarQube. -3. Check the pipeline running logs by clicking **Show Logs** in the upper-right corner. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. +5. Go to the **Projects** page, and you can view the resources deployed in different projects across the clusters by selecting a specific cluster from the drop-down list. - ![pipeline-logs](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-logs.png) - -4. Once the pipeline runs successfully, click **Code Quality** to check the results through SonarQube. - - ![sonarqube-result](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/sonarqube-result.png) - -5. Go to the **Projects** page and you can view the resources deployed in different projects across the clusters by selecting a specific cluster from the drop-down list. - - ![host-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/host-pods.png) - - ![shire-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/shire-pods.png) - - ![rohan-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/rohan-pods.png) diff --git a/content/en/docs/devops-user-guide/examples/customize-jenkins-agent.md b/content/en/docs/devops-user-guide/examples/customize-jenkins-agent.md new file mode 100644 index 000000000..b1ed79c48 --- /dev/null +++ b/content/en/docs/devops-user-guide/examples/customize-jenkins-agent.md @@ -0,0 +1,70 @@ +--- +title: "Customize Jenkins Agent" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, Agent" +description: "Learn how to customize a Jenkins agent on KubeSphere." +linkTitle: "Customize Jenkins Agent" +Weight: 11460 +--- + +If you need to use a Jenkins agent that runs on a specific environment, for example, JDK 11, you can customize a Jenkins agent on KubeSphere. + +This document describes how to customize a Jenkins agent on KubeSphere. + +## Prerequisites + +- You have enabled [the KubeSphere DevOps System](../../../pluggable-components/devops/). + +## Customize a Jenkins agent + +1. Log in to the web console of KubeSphere as `admin`. + +2. Click **Platform** in the upper-left corner, select **Cluster Management**, and click **Configmaps** under **Configuration** on the left navigation pane. + +3. On the **Configmaps** page, enter `jenkins-casc-config` in the search box and press **Enter**. + +4. Click `jenkins-casc-config` to go to its details page, click **More**, and select **Edit YAML**. + +5. In the displayed dialog box, enter the following code under the `data.jenkins_user.yaml:jenkins.clouds.kubernetes.templates` section and click **OK**. + + ```yaml + - name: "maven-jdk11" # The name of the customized Jenkins agent. + label: "maven jdk11" # The label of the customized Jenkins agent. To specify multiple labels, use spaces to seperate them. + inheritFrom: "maven" # The name of the existing pod template from which this customzied Jenkins agent inherits. + containers: + - name: "maven" # The container name specified in the existing pod template from which this customzied Jenkins agent inherits. + image: "kubespheredev/builder-maven:v3.2.0jdk11" # This image is used for testing purposes only. You can use your own images. + ``` + + {{< notice note >}} + + Make sure you follow the indentation in the YAML file. + + {{}} + +6. Wait for at least 70 seconds until your changes are automatically reloaded. + +7. To use the custom Jenkins agent, refer to the following sample Jenkinsfile to specify the label and container name of the custom Jenkins agent accordingly when creating a pipeline. + + ```groovy + pipeline { + agent { + node { + label 'maven && jdk11' + } + } + stages { + stage('Print Maven and JDK version') { + steps { + container('maven') { + sh ''' + mvn -v + java -version + ''' + } + } + } + } + } + ``` + + diff --git a/content/en/docs/devops-user-guide/examples/go-project-pipeline.md b/content/en/docs/devops-user-guide/examples/go-project-pipeline.md index f64fc1813..59ab66dbc 100644 --- a/content/en/docs/devops-user-guide/examples/go-project-pipeline.md +++ b/content/en/docs/devops-user-guide/examples/go-project-pipeline.md @@ -1,6 +1,6 @@ --- title: "Build and Deploy a Go Project" -keywords: 'Kubernetes, docker, devops, jenkins, go, KubeSphere' +keywords: 'Kubernetes, docker, DevOps, Jenkins, Go, KubeSphere' description: 'Learn how to build and deploy a Go project using a KubeSphere pipeline.' linkTitle: "Build and Deploy a Go Project" weight: 11410 @@ -10,37 +10,25 @@ weight: 11410 - You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/). - You need to have a [Docker Hub](https://hub.docker.com/) account. -- You need to create a workspace, a DevOps project, a project, and an account (`project-regular`). This account needs to be invited to the DevOps project and the project for deploying your workload with the role `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project, a project, and a user (`project-regular`). This account needs to be invited to the DevOps project and the project for deploying your workload with the role `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a Docker Hub Access Token -1. Log in to [Docker Hub](https://hub.docker.com/) and select **Account Settings** from the menu in the top-right corner. +1. Log in to [Docker Hub](https://hub.docker.com/), click your account in the upper-right corner, and select **Account Settings** from the menu. - ![dockerhub-settings](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg) +2. Click **Security** in the left navigation pane and then click **New Access Token**. -2. Click **Security** and **New Access Token**. +3. In the displayed dialog box, enter a token name (`go-project-token`) and click **Create**. - ![dockerhub-create-token](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg) - -3. Enter the token name and click **Create**. - - ![dockerhub-token-ok](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg) - -4. Click **Copy and Close** and remember to save the access token. - - ![dockerhub-token-copy](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg) +4. Click **Copy and Close** and make sure you save the access token. ## Create Credentials You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for imaging pushing. Besides, you also create kubeconfig credentials for the access to the Kubernetes cluster. -1. Log in to the web console of KubeSphere as `project-regular`. Go to your DevOps project and click **Create** in **Credentials**. +1. Log in to the web console of KubeSphere as `project-regular`. In your DevOps project, go to **Credentials** under **DevOps Project Settings** and then click **Create** on the **Credentials** page. - ![create-dockerhub-id](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.png) - -2. In the dialog that appears, set a **Credential ID**, which will be used later in the Jenkinsfile, and select **Account Credentials** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Token/Password**. When you finish, click **OK**. - - ![credential-docker-create](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg) +2. In the displayed dialog box, set a **Name**, which is used later in the Jenkinsfile, and select **Username and password** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Password/Token**. When you finish, click **OK**. {{< notice tip >}} @@ -48,9 +36,7 @@ For more information about how to create credentials, see [Credential Management {{}} -3. Click **Create** again and select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current user account. Set a **Credential ID** and click **OK**. - - ![create-kubeconfig](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg) +3. Click **Create** again and select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current user account. Set a **Name** and click **OK**. ## Create a Pipeline @@ -58,32 +44,22 @@ With the above credentials ready, you can create a pipeline using an example Jen 1. To create a pipeline, click **Create** on the **Pipelines** page. - ![create-pipeline](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.png) +2. Set a name in the displayed dialog box and click **Next**. -2. Set a name in the pop-up window and click **Next** directly. - - ![set-pipeline-name](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.png) - -3. In this tutorial, you can use default values for all the fields. In **Advanced Settings**, click **Create** directly. - - ![create-pipeline-2](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.png) +3. In this tutorial, you can use default values for all the fields. On the **Advanced Settings** tab, click **Create**. ## Edit the Jenkinsfile -1. In the pipeline list, click this pipeline to go to its detail page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. +1. In the pipeline list, click the pipeline name to go to its details page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. - ![edit-jenkinsfile](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.png) +2. Copy and paste all the content below to the displayed dialog box as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, and `PROJECT_NAME` with yours. When you finish, click **OK**. -2. Copy and paste all the content below to the pop-up window as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, and `PROJECT_NAME` with yours. When you finish, click **OK**. - - ```groovy - pipeline { + ```groovy + pipeline { agent { - node { - label 'maven' - } + label 'go' } - + environment { // the address of your Docker Hub registry REGISTRY = 'docker.io' @@ -91,7 +67,7 @@ With the above credentials ready, you can create a pipeline using an example Jen DOCKERHUB_USERNAME = 'Docker Hub Username' // Docker image name APP_NAME = 'devops-go-sample' - // ‘dockerhubid’ is the credentials ID you created in KubeSphere with Docker Hub Access Token + // 'dockerhubid' is the credentials ID you created in KubeSphere with Docker Hub Access Token DOCKERHUB_CREDENTIAL = credentials('dockerhubid') // the kubeconfig credentials ID you created in KubeSphere KUBECONFIG_CREDENTIAL_ID = 'go' @@ -102,31 +78,37 @@ With the above credentials ready, you can create a pipeline using an example Jen stages { stage('docker login') { steps{ - container ('maven') { + container ('go') { sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' - } - } - } - - stage('build & push') { - steps { - container ('maven') { - sh 'git clone https://github.com/yuswift/devops-go-sample.git' - sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' - sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' - } - } - } - stage ('deploy app') { - steps { - container('maven') { - kubernetesDeploy(configs: 'devops-go-sample/manifest/deploy.yaml', kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID") - } } } } - } - ``` + + stage('build & push') { + steps { + container ('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + stage ('deploy app') { + steps { + container ('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/deploy.yaml | kubectl apply -f -' + } + } + } + } + } + } + ``` {{< notice note >}} @@ -136,25 +118,15 @@ If your pipeline runs successfully, images will be pushed to Docker Hub. If you ## Run the Pipeline -1. After you finish the Jenkinsfile, you can see graphical panels display on the dashboard. Click **Run** to run the pipeline. +1. After you finish the Jenkinsfile, you can see graphical panels are displayed on the dashboard. Click **Run** to run the pipeline. - ![run-pipeline](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.png) - -2. In **Activity**, you can see the status of the pipeline. It may take a while before it successfully runs. - - ![pipeline-running](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.png) +2. In **Run Records**, you can see the status of the pipeline. It may take a while before it successfully runs. ## Verify Results -1. A **Deployment** will be created in the project specified in the Jenkinsfile if the pipeline runs successfully. +1. A **Deployment** is created in the project specified in the Jenkinsfile if the pipeline runs successfully. - ![view-deployments](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.png) - -2. Check whether the image is pushed to Docker Hub as shown below: - - ![docker-image-1](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg) - - ![docker-image-2](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg) +2. Check the image that is pushed to Docker Hub. \ No newline at end of file diff --git a/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md b/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md index da499b405..9bc5a1dce 100644 --- a/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md +++ b/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md @@ -10,39 +10,27 @@ weight: 11420 - You need to [enable the multi-cluster feature](../../../../docs/multicluster-management/) and create a workspace with your multiple clusters. - You need to have a [Docker Hub](https://hub.docker.com/) account. -- You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/) on your Host Cluster. -- You need to use an account (for example, `project-admin`) with the role of `workspace-self-provisioner` to create a multi-cluster project and a DevOps project on the Host Cluster. This tutorial creates a multi-cluster project on the Host Cluster and one Member Cluster. -- You need to invite an account (for example, `project-regular`) to the DevOps project and grant it the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/), [Multi-cluster Management](../../../multicluster-management/) and [Multi-cluster Projects](../../../project-administration/project-and-multicluster-project/#multi-cluster-projects). +- You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/) on your host cluster. +- You need to use a user (for example, `project-admin`) with the role of `workspace-self-provisioner` to create a multi-cluster project and a DevOps project on the host cluster. This tutorial creates a multi-cluster project on the host cluster and one member cluster. +- You need to invite a user (for example, `project-regular`) to the DevOps project and grant it the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/), [Multi-cluster Management](../../../multicluster-management/) and [Multi-cluster Projects](../../../project-administration/project-and-multicluster-project/#multi-cluster-projects). ## Create a Docker Hub Access Token -1. Log in to [Docker Hub](https://hub.docker.com/) and select **Account Settings** from the menu in the top-right corner. +1. Log in to [Docker Hub](https://hub.docker.com/), click your account in the upper-right corner, and select **Account Settings** from the menu. - ![dockerhub-settings](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg) +2. Click **Security** in the left navigation pane and then click **New Access Token**. -2. Click **Security** and **New Access Token**. +3. In the displayed dialog box, enter a token name (`go-project-token`) and click **Create**. - ![dockerhub-create-token](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg) - -3. Enter the token name and click **Create**. - - ![dockerhub-token-ok](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg) - -4. Click **Copy and Close** and remember to save the access token. - - ![dockerhub-token-copy](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg) +4. Click **Copy and Close** and make sure you save the access token. ## Create Credentials You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for pushing images. Besides, you also need to create kubeconfig credentials for the access to the Kubernetes cluster. -1. Log in to the web console of KubeSphere as `project-regular`. Go to your DevOps project and click **Create** in **Credentials**. +1. Log in to the web console of KubeSphere as `project-regular`. In your DevOps project, go to **Credentials** under **DevOps Project Settings** and then click **Create** on the **Credentials** page. - ![create-dockerhub-id](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.png) - -2. In the dialog that appears, set a **Credential ID**, which will be used later in the Jenkinsfile, and select **Account Credentials** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Token/Password**. When you finish, click **OK**. - - ![credential-docker-create](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg) +2. In the displayed dialog box, set a **Name**, which is used later in the Jenkinsfile, and select **Username and password** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Password/Token**. When you finish, click **OK**. {{< notice tip >}} @@ -50,9 +38,7 @@ You need to create credentials in KubeSphere for the access token created so tha {{}} -3. Log out of the KubeSphere web console and log back in as `project-admin`. Go to your DevOps project and click **Create** in **Credentials**. Select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current account. Set a **Credential ID** and click **OK**. - - ![create-kubeconfig](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg) +3. Log out of the KubeSphere web console and log back in as `project-admin`. Go to your DevOps project and click **Create** in **Credentials**. Select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current account. Set a **Name** and click **OK**. {{< notice note >}} @@ -62,35 +48,24 @@ You need to create credentials in KubeSphere for the access token created so tha ## Create a Pipeline -With the above credentials ready, you can use the account `project-regular` to create a pipeline with an example Jenkinsfile as below. +With the above credentials ready, you can use the user `project-regular` to create a pipeline with an example Jenkinsfile as below. 1. To create a pipeline, click **Create** on the **Pipelines** page. - ![create-pipeline](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.png) +2. Set a name in the displayed dialog box and click **Next**. -2. Set a name in the pop-up window and click **Next** directly. - - ![set-pipeline-name](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.png) - -3. In this tutorial, you can use default values for all the fields. In **Advanced Settings**, click **Create** directly. - - ![create-pipeline-2](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.png) +3. In this tutorial, you can use default values for all the fields. On the **Advanced Settings** tab, click **Create**. ## Edit the Jenkinsfile -1. In the pipeline list, click this pipeline to go to its detail page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. +1. In the pipeline list, click this pipeline to go to its details page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it. - ![edit-jenkinsfile](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.png) - -2. Copy and paste all the content below to the pop-up window as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, `MULTI_CLUSTER_PROJECT_NAME`, and `MEMBER_CLUSTER_NAME` with yours. When you finish, click **OK**. +2. Copy and paste all the content below to the displayed dialog box as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, `MULTI_CLUSTER_PROJECT_NAME`, and `MEMBER_CLUSTER_NAME` with yours. When you finish, click **OK**. ```groovy pipeline { agent { - node { - label 'maven' - } - + label 'go' } environment { @@ -98,31 +73,30 @@ With the above credentials ready, you can use the account `project-regular` to c // Docker Hub username DOCKERHUB_USERNAME = 'Your Docker Hub username' APP_NAME = 'devops-go-sample' - // ‘dockerhub-go’ is the Docker Hub credentials ID you created on the KubeSphere console - DOCKERHUB_CREDENTIAL = credentials('dockerhub-go') + // ‘dockerhub’ is the Docker Hub credentials ID you created on the KubeSphere console + DOCKERHUB_CREDENTIAL = credentials('dockerhub') // the kubeconfig credentials ID you created on the KubeSphere console - KUBECONFIG_CREDENTIAL_ID = 'dockerhub-go-kubeconfig' + KUBECONFIG_CREDENTIAL_ID = 'kubeconfig' // mutli-cluster project name under your own workspace MULTI_CLUSTER_PROJECT_NAME = 'demo-multi-cluster' - // the name of the Member Cluster where you want to deploy your app - // in this tutorial, the apps are deployed on Host Cluster and only one Member Cluster - // for more Member Clusters, please edit manifest/multi-cluster-deploy.yaml - MEMBER_CLUSTER_NAME = 'Your Member Cluster name' + // the name of the member cluster where you want to deploy your app + // in this tutorial, the apps are deployed on host cluster and only one member cluster + // for more member clusters, please edit manifest/multi-cluster-deploy.yaml + MEMBER_CLUSTER_NAME = 'Your member cluster name' } stages { stage('docker login') { steps { - container('maven') { + container('go') { sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' } - } } stage('build & push') { steps { - container('maven') { + container('go') { sh 'git clone https://github.com/yuswift/devops-go-sample.git' sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' @@ -132,17 +106,15 @@ With the above credentials ready, you can use the account `project-regular` to c stage('deploy app to multi cluster') { steps { - container('maven') { - script { + container('go') { withCredentials([ kubeconfigFile( - credentialsId: 'dockerhub-go-kubeconfig', + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, variable: 'KUBECONFIG') ]) { sh 'envsubst < devops-go-sample/manifest/multi-cluster-deploy.yaml | kubectl apply -f -' } - } - } + } } } } @@ -158,5 +130,3 @@ With the above credentials ready, you can use the account `project-regular` to c ## Run the Pipeline After you save the Jenkinsfile, click **Run**. If everything goes well, you will see the Deployment workload in your multi-cluster project. - -![multi-cluster-ok](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png) \ No newline at end of file diff --git a/content/en/docs/devops-user-guide/examples/use-nexus-in-pipelines.md b/content/en/docs/devops-user-guide/examples/use-nexus-in-pipelines.md index b7bbe8771..14333d8e9 100644 --- a/content/en/docs/devops-user-guide/examples/use-nexus-in-pipelines.md +++ b/content/en/docs/devops-user-guide/examples/use-nexus-in-pipelines.md @@ -15,7 +15,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. - You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/). - You need to [prepare a Nexus instance](https://help.sonatype.com/repomanager3/installation). - You need to have a [GitHub](https://github.com/) account. -- You need to create a workspace, a DevOps project (for example, `demo-devops`), and an account (for example, `project-regular`). This account needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project (for example, `demo-devops`), and a user (for example, `project-regular`). This account needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab @@ -29,11 +29,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. - `hosted`: the repository storing artifacts on Nexus. - `group`: a group of configured Nexus repositories. - ![repo-type](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png) - -3. You can click a repository to view its details. For example, click **maven-public** to go to its detail page and you can see its URL. - - ![maven-public-url](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png) +3. You can click a repository to view its details. For example, click **maven-public** to go to its details page, and you can see its **URL**. ### Step 2: Modify `pom.xml` in your GitHub repository @@ -41,9 +37,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. 2. In your own GitHub repository of **learn-pipeline-java**, click the file `pom.xml` in the root directory. - ![click-pom](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png) - -3. Click to modify the code segment of `` in the file. Set the `` and use the URLs of your own Nexus repositories . +3. Click to modify the code segment of `` in the file. Set the `` and use the URLs of your own Nexus repositories. ![modify-pom](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/modify-pom.png) @@ -53,13 +47,9 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. 1. Log in to the KubeSphere web console as `admin`, click **Platform** in the upper-left corner, and select **Cluster Management**. -2. Select **ConfigMaps** under **Configurations**. On the **ConfigMaps** page, select `kubesphere-devops-system` from the drop-down list and click `ks-devops-agent`. +2. Select **ConfigMaps** under **Configuration**. On the **ConfigMaps** page, select `kubesphere-devops-worker` from the drop-down list and click `ks-devops-agent`. - ![ks-devops-agent](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png) - -3. On the detail page, click **Edit YAML** from the **More** drop-down menu. - - ![click-edit-yaml](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png) +3. On the details page, click **Edit YAML** from the **More** drop-down menu. 4. In the displayed dialog box, scroll down, find the code segment of ``, and enter the following code: @@ -102,7 +92,7 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. {{}} -6. When you finish, click **Update**. +6. When you finish, click **OK**. ### Step 4: Create a pipeline @@ -110,13 +100,9 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. 2. On the **Basic Information** tab, set a name for the pipeline (for example, `nexus-pipeline`) and click **Next**. - ![set-pipeline-name](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png) - 3. On the **Advanced Settings** tab, click **Create** to use the default settings. -4. Click the pipeline to go to its detail page and click **Edit Jenkinsfile**. - - ![click-edit-jenkinsfile](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png) +4. Click the pipeline name to go to its details page and click **Edit Jenkinsfile**. 5. In the displayed dialog box, enter the Jenkinsfile as follows. When you finish, click **OK**. @@ -158,8 +144,6 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. } ``` - ![enter-jenkinsfile](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png) - {{< notice note >}} You need to replace the GitHub repository address with your own. In the command from the step in the stage `deploy to Nexus`, `nexus` is the name you set in `` in the ConfigMap and `http://135.68.37.85:8081/repository/maven-snapshots/` is the URL of your Nexus repository. @@ -170,15 +154,9 @@ This tutorial demonstrates how to use Nexus in pipelines on KubeSphere. 1. You can see all the stages and steps shown on the graphical editing panels. Click **Run** to run the pipeline. - ![click-run](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png) +2. After a while, you can see the pipeline status shown as **Successful**. Click the **Successful** record to see its details. -2. After a while, you can see the pipeline status shown as **Success**. Click the **Success** record to see its details. - - ![pipeline-success](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png) - -3. You can click **Show Logs** to view the detailed logs. - - ![pipeline-logs](/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png) +3. You can click **View Logs** to view the detailed logs. 4. Log in to Nexus and click **Browse**. Click **maven-public** and you can see all the dependencies have been downloaded. diff --git a/content/en/docs/devops-user-guide/how-to-integrate/harbor.md b/content/en/docs/devops-user-guide/how-to-integrate/harbor.md index 650e7497a..150f4d529 100644 --- a/content/en/docs/devops-user-guide/how-to-integrate/harbor.md +++ b/content/en/docs/devops-user-guide/how-to-integrate/harbor.md @@ -11,7 +11,7 @@ This tutorial demonstrates how to integrate Harbor into KubeSphere pipelines. ## Prerequisites - You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). -- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. ## Install Harbor @@ -26,25 +26,15 @@ helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL ## Get Harbor Credentials -1. After Harbor is installed, visit `NodeIP:30002` and log in to the console with the default account and password (`admin/Harbor12345`). Go to **Projects** and click **NEW PROJECT**. +1. After Harbor is installed, visit `:30002` and log in to the console with the default account and password (`admin/Harbor12345`). Click **Projects** in the left navigation pane and click **NEW PROJECT** on the **Projects** page. - ![harbor-projects](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/harbor-projects.jpg) +2. In the displayed dialog box, set a name (`ks-devops-harbor`) and click **OK**. -2. Set a name (`ks-devops-harbor`) and click **OK**. +3. Click the project you just created, and click **NEW ROBOT ACCOUNT** under the **Robot Accounts** tab. - ![set-name](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/set-name.png) +4. In the displayed dialog box, set a name (`robot-test`) for the robot account and click **SAVE**. Make sure you select the checkbox for pushing artifact in **Permissions**. -3. Click the project you just created, and select **NEW ROBOT ACCOUNT** in **Robot Accounts**. - - ![robot-account](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account.png) - -4. Set a name (`robot-test`) for the robot account and save it. - - ![robot-account-name](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account-name.png) - -5. Click **EXPORT TO FILE** to save the token. - - ![export-to-file](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/export-to-file.png) +5. In the displayed dialog box, click **EXPORT TO FILE** to save the token. ## Enable Insecure Registry @@ -79,13 +69,9 @@ You have to configure Docker to disregard security for your Harbor registry. ## Create Credentials -1. Log in to KubeSphere as `project-regular`, go to your DevOps project and create credentials for Harbor in **Credentials** under **Project Management**. +1. Log in to KubeSphere as `project-regular`, go to your DevOps project and create credentials for Harbor in **Credentials** under **DevOps Project Settings**. - ![create-credentials](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-credentials.png) - -2. On the **Create Credentials** page, set a credential ID (`robot-test`) and select **Account Credentials** for **Type**. The **Username** field must be the same as the value of `name` in the JSON file you just downloaded and enter the value of `token` in the file for **Token/Password**. - - ![credentials-page](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/credentials-page.png) +2. On the **Create Credentials** page, set a credential ID (`robot-test`) and select **Username and password** for **Type**. The **Username** field must be the same as the value of `name` in the JSON file you just downloaded and enter the value of `token` in the file for **Password/Token**. 3. Click **OK** to save it. @@ -93,17 +79,11 @@ You have to configure Docker to disregard security for your Harbor registry. 1. Go to the **Pipelines** page and click **Create**. In the **Basic Information** tab, enter a name (`demo-pipeline`) for the pipeline and click **Next**. - ![basic-info](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/basic-info.png) - 2. Use default values in **Advanced Settings** and click **Create**. - ![advanced-settings](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/advanced-settings.png) - ## Edit the Jenkinsfile -1. Click the pipeline to go to its detail page and click **Edit Jenkinsfile**. - - ![edit-jenkinsfile](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/edit-jenkinsfile.png) +1. Click the pipeline to go to its details page and click **Edit Jenkinsfile**. 2. Copy and paste the following contents into the Jenkinsfile. Note that you must replace the values of `REGISTRY`, `HARBOR_NAMESPACE`, `APP_NAME`, and `HARBOR_CREDENTIAL` with your own values. @@ -160,6 +140,5 @@ You have to configure Docker to disregard security for your Harbor registry. ## Run the Pipeline -Save the Jenkinsfile and KubeSphere automatically creates all stages and steps on the graphical editing panel. Click **Run** to run the pipeline. If everything goes well, the image will be pushed to your Harbor registry by Jenkins. +Save the Jenkinsfile and KubeSphere automatically creates all stages and steps on the graphical editing panel. Click **Run** to run the pipeline. If everything goes well, the image is pushed to your Harbor registry by Jenkins. -![image-pushed](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/image-pushed.png) diff --git a/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md b/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md index 454de6913..c7d9bcccd 100644 --- a/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md +++ b/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md @@ -1,6 +1,6 @@ --- title: "Integrate SonarQube into Pipelines" -keywords: 'Kubernetes, KubeSphere, devops, jenkins, sonarqube, pipeline' +keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins, SonarQube, Pipeline' description: 'Integrate SonarQube into your pipeline for code quality analysis.' linkTitle: "Integrate SonarQube into Pipelines" weight: 11310 @@ -79,19 +79,15 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir ```bash $ kubectl get pod -n kubesphere-devops-system NAME READY STATUS RESTARTS AGE - ks-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m + devops-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m s2ioperator-0 1/1 Running 1 84m sonarqube-postgresql-0 1/1 Running 0 5m31s sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s ``` -2. Access the SonarQube console `http://{$Node IP}:{$NodePort}` in your browser and you can see its homepage as below: +2. Access the SonarQube console `http://:` in your browser. - ![access-sonarqube-console](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/access-sonarqube-console.jpg) - -3. Click **Log in** in the top-right corner and use the default account `admin/admin`. - - ![log-in-page](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/log-in-page.jpg) +3. Click **Log in** in the upper-right corner and log in as the default account `admin/admin`. {{< notice note >}} @@ -124,7 +120,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir 1. Execute the following command to get the address of SonarQube Webhook. ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/ ``` @@ -143,7 +139,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir ![sonarqube-webhook-3](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-webhook-3.jpg) -5. Enter **Name** and **Jenkins Console URL** (i.e. the SonarQube Webhook address) in the dialog that appears. Click **Create** to finish. +5. Enter **Name** and **Jenkins Console URL** (for example, the SonarQube Webhook address) in the displayed dialog box. Click **Create** to finish. ![webhook-page-info](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/webhook-page-info.jpg) @@ -178,7 +174,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir 1. Execute the following command to get the address of Jenkins. ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT ``` @@ -189,9 +185,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir http://192.168.0.4:30180 ``` -3. Access Jenkins with the address `http://{$Public IP}:30180`. When KubeSphere is installed, the Jenkins dashboard is also installed by default. Besides, Jenkins is configured with KubeSphere LDAP, which means you can log in to Jenkins with KubeSphere accounts (for example, `admin/P@88w0rd`) directly. For more information about configuring Jenkins, see [Jenkins System Settings](../../../devops-user-guide/how-to-use/jenkins-setting/). - - ![jenkins-login-page](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/jenkins-login-page.jpg) +3. Access Jenkins with the address `http://:30180`. When KubeSphere is installed, the Jenkins dashboard is also installed by default. Besides, Jenkins is configured with KubeSphere LDAP, which means you can log in to Jenkins with KubeSphere accounts (for example, `admin/P@88w0rd`) directly. For more information about configuring Jenkins, see [Jenkins System Settings](../../../devops-user-guide/how-to-use/jenkins-setting/). {{< notice note >}} @@ -199,19 +193,13 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir {{}} -4. Click **Manage Jenkins** on the left. - - ![manage-jenkins](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/manage-jenkins.png) +4. Click **Manage Jenkins** on the left navigation pane. 5. Scroll down to **Configure System** and click it. - ![configure-system](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/configure-system.png) - 6. Navigate to **SonarQube servers** and click **Add SonarQube**. - ![add-sonarqube](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/add-sonarqube.png) - -7. Enter **Name** and **Server URL** (`http://{$Node IP}:{$NodePort}`). Click **Add**, select **Jenkins**, and then create the credentials with the SonarQube admin token in the dialog that appears as shown in the second image below. After adding the credentials, select it from the drop-down list for **Server authentication token** and then click **Apply** to finish. +7. Enter **Name** and **Server URL** (`http://:`). Click **Add**, select **Jenkins**, and then create the credentials with the SonarQube admin token in the displayed dialog box as shown in the second image below. After adding the credentials, select it from the drop-down list for **Server authentication token** and then click **Apply** to finish. ![sonarqube-jenkins-settings](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-jenkins-settings.png) @@ -233,14 +221,10 @@ You need to specify `sonarqubeURL` so that you can access SonarQube directly fro kubectl edit cm -n kubesphere-system ks-console-config ``` -2. Navigate to `client` and add the field `devops` with `sonarqubeURL` specified. +2. Go to `data.client.enableKubeConfig` and add the field `devops` with `sonarqubeURL` specified under it. ```bash client: - version: - kubesphere: v3.0.0 - kubernetes: v1.17.9 - openpitrix: v0.3.5 enableKubeConfig: true devops: # Add this field manually. sonarqubeURL: http://192.168.0.4:31434 # The SonarQube IP address. @@ -253,7 +237,7 @@ You need to specify `sonarqubeURL` so that you can access SonarQube directly fro Execute the following commands. ```bash -kubectl -n kubesphere-system rollout restart deploy ks-apiserver +kubectl -n kubesphere-devops-system rollout restart deploy devops-apiserver ``` ```bash @@ -286,6 +270,4 @@ You need a SonarQube token so that your pipeline can communicate with SonarQube ## View Results on the KubeSphere Console -After you [create a pipeline using the graphical editing panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel/) or [create a pipeline using a Jenkinsfile](../../how-to-use/create-a-pipeline-using-jenkinsfile/), you can view the result of code quality analysis. For example, you may see an image as below if SonarQube runs successfully. - -![sonarqube-view-result](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-view-result.jpg) +After you [create a pipeline using the graphical editing panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel/) or [create a pipeline using a Jenkinsfile](../../how-to-use/create-a-pipeline-using-jenkinsfile/), you can view the result of code quality analysis. diff --git a/content/en/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md b/content/en/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md index 7f5428551..1dd2a13f4 100644 --- a/content/en/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md +++ b/content/en/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md @@ -1,18 +1,18 @@ --- title: "Choose Jenkins Agent" -keywords: 'Kubernetes, KubeSphere, docker, devops, jenkins, agent' +keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, Agent' description: 'Specify the Jenkins agent and use the built-in podTemplate for your pipeline.' linkTitle: "Choose Jenkins Agent" weight: 11250 --- -The `agent` section specifies where the entire Pipeline, or a specific stage, will execute in the Jenkins environment depending on where the `agent` section is placed. The section must be defined at the top-level inside the `pipeline` block, but stage-level usage is optional. For more information, see [the official documentation of Jenkins](https://www.jenkins.io/doc/book/pipeline/syntax/#agent). +The `agent` section specifies where the entire Pipeline, or a specific stage, will execute in the Jenkins environment depending on where the `agent` section is placed. The section must be defined at the upper-level inside the `pipeline` block, but stage-level usage is optional. For more information, see [the official documentation of Jenkins](https://www.jenkins.io/doc/book/pipeline/syntax/#agent). ## Built-in podTemplate A podTemplate is a template of a Pod that is used to create agents. Users can define a podTemplate to use in the Kubernetes plugin. -As a pipeline runs, every Jenkins agent Pod must have a container named `jnlp` for communications between the Jenkins master and Jenkins agent. In addition, users can add containers in the podTemplate to meet their own needs. They can choose to use their own Pod YAML to flexibly control the runtime, and the container can be switched by the `container` command. Here is an example. +As a pipeline runs, every Jenkins agent Pod must have a container named `jnlp` for communications between the Jenkins controller and Jenkins agent. In addition, users can add containers in the podTemplate to meet their own needs. They can choose to use their own Pod YAML to flexibly control the runtime, and the container can be switched by the `container` command. Here is an example. ```groovy pipeline { diff --git a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md index 86a466ab1..393ba5238 100644 --- a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md +++ b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md @@ -14,7 +14,7 @@ This tutorial demonstrates how to create a pipeline through graphical editing pa - You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/). - You need to have a [Docker Hub](http://www.dockerhub.com/) account. -- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account must be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. - Set CI dedicated nodes to run the pipeline. For more information, see [Set CI Node for Dependency Cache](../set-ci-node/). - Configure your email server for pipeline notifications (optional). For more information, see [Set Email Server for KubeSphere Pipelines](../../how-to-use/jenkins-email/). - Configure SonarQube to include code analysis as part of the pipeline (optional). For more information, see [Integrate SonarQube into Pipelines](../../../devops-user-guide/how-to-integrate/sonarqube/). @@ -40,7 +40,7 @@ This example pipeline includes the following six stages. ### Step 1: Create credentials -1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **Project Management**. For more information about how to create credentials, see [Credential Management](../credential-management/). +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../credential-management/). {{< notice note >}} @@ -48,38 +48,28 @@ This example pipeline includes the following six stages. {{}} - | Credential ID | Type | Where to use | - | --------------- | ------------------- | ------------ | - | dockerhub-id | Account Credentials | Docker Hub | - | demo-kubeconfig | kubeconfig | Kubernetes | + | Credential ID | Type | Where to use | + | --------------- | --------------------- | ------------ | + | dockerhub-id | Username and password | Docker Hub | + | demo-kubeconfig | kubeconfig | Kubernetes | -2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (Code analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project) to use the token for the **secret** field below. Click **OK** to finish. - - ![sonar-token](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonar-token.png) +2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (Code analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project) to enter your SonarQube token in the **Token** field for a credential of the **Access token** type. Click **OK** to finish. 3. In total, you have three credentials in the list. - ![credential-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/credential-list.png) - ### Step 2: Create a project -In this tutorial, the example pipeline will deploy the [sample](https://github.com/kubesphere/devops-java-sample/tree/sonarqube) app to a project. Hence, you must create the project (for example, `kubesphere-sample-dev`) in advance. The Deployment and Service of the app will be created automatically in the project once the pipeline runs successfully. +In this tutorial, the example pipeline will deploy the [sample](https://github.com/kubesphere/devops-maven-sample/tree/sonarqube) app to a project. Hence, you must create the project (for example, `kubesphere-sample-dev`) in advance. The Deployment and Service of the app will be created automatically in the project once the pipeline runs successfully. -You can use the account `project-admin` to create the project. Besides, this account is also the reviewer of the CI/CD pipeline. Make sure the account `project-regular` is invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You can use the user `project-admin` to create the project. Besides, this user is also the reviewer of the CI/CD pipeline. Make sure the account `project-regular` is invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ### Step 3: Create a pipeline 1. Make sure you have logged in to KubeSphere as `project-regular`, and then go to your DevOps project. Click **Create** on the **Pipelines** page. - ![create-pipeline](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/create-pipeline.png) +2. In the displayed dialog box, name it `graphical-pipeline` and click **Next**. -2. In the dialog that appears, name it `graphical-pipeline` and click **Next**. - - ![basic-info](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/basic-info.png) - -3. On the **Advanced Settings** page, click **Add Parameter** to add three string parameters as follows. These parameters will be used in the Docker command of the pipeline. Click **Create** when you finish adding. - - ![add-parameter](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/add-parameter.png) +3. On the **Advanced Settings** page, click **Add** to add three string parameters as follows. These parameters will be used in the Docker command of the pipeline. Click **Create** when you finish adding. | Parameter Type | Name | Value | Description | | -------------- | ------------------- | --------------- | ------------------------------------------------------------ | @@ -93,30 +83,20 @@ You can use the account `project-admin` to create the project. Besides, this acc {{}} -4. The pipeline created appears in the list. - - ![pipeline-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-list.png) +4. The pipeline created is displayed in the list. ### Step 4: Edit the pipeline -Click the pipeline to go to its detail page. To use graphical editing panels, click **Edit Pipeline** under the tab **Pipeline**. In the dialog that appears, click **Custom Pipeline**. This pipeline consists of six stages. Follow the steps below to set each stage. - -![edit-pipeline](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/edit-pipeline.png) +Click the pipeline to go to its details page. To use graphical editing panels, click **Edit Pipeline** under the tab **Task Status**. In the displayed dialog box, click **Custom Pipeline**. This pipeline consists of six stages. Follow the steps below to set each stage. {{< notice note >}} -The pipeline detail page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Success** icon if the synchronization is successful. You can also click **Edit Jenkinsfile** to create a Jenkinsfile manually for your pipeline. +- The pipeline details page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Successful** icon if the synchronization is successful. You can also click **Edit Jenkinsfile** to create a Jenkinsfile manually for your pipeline. + +- You can also click **Continuous Integration (CI)** and **Continuous Integration & Delivery (CI/CD)** to [use the built-in pipeline templates](../use-pipeline-templates/) provided by KubeSphere. {{}} -![click-custom-pipeline](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/click-custom-pipeline.png) - -{{< notice note >}} - -You can also click **Continuous Integration (CI)** and **Continuous Integration & Delivery (CI/CD)** to [use the built-in pipeline templates](../use-pipeline-templates/) provided by KubeSphere. - -{{}} - #### Stage 1: Pull source code (Checkout SCM) A graphical editing panel includes two areas - **canvas** on the left and **content** on the right. It automatically generates a Jenkinsfile based on how you configure different stages and steps, which is much more user-friendly for developers. @@ -141,10 +121,10 @@ Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeli ![edit-panel](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/edit-panel.png) -3. Click **Add Step**. Select **git** from the list as the example code is pulled from GitHub. In the dialog that appears, fill in the required field. Click **OK** to finish. +3. Click **Add Step**. Select **git** from the list as the example code is pulled from GitHub. In the displayed dialog box, fill in the required field. Click **OK** to finish. - - **URL**. Enter the GitHub repository address `https://github.com/kubesphere/devops-java-sample.git`. Note that this is an example and you need to use your own repository address. - - **Credential ID**. You do not need to enter the Credential ID for this tutorial. + - **URL**. Enter the GitHub repository address `https://github.com/kubesphere/devops-maven-sample.git`. Note that this is an example and you need to use your own repository address. + - **Name**. You do not need to enter the Credential ID for this tutorial. - **Branch**. It defaults to the master branch if you leave it blank. Enter `sonarqube` or leave it blank if you do not need the code analysis stage. ![enter-repo-url](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/enter-repo-url.png) @@ -166,7 +146,7 @@ Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeli 3. Click **Add Nesting Steps** to add a nested step under the `maven` container. Select **shell** from the list and enter the following command in the command line. Click **OK** to save it. ```shell - mvn clean -o -gs `pwd`/configuration/settings.xml test + mvn clean -gs `pwd`/configuration/settings.xml test ``` {{< notice note >}} @@ -175,9 +155,6 @@ Pipelines include [declarative pipelines](https://www.jenkins.io/doc/book/pipeli {{}} - ![shell](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell.png) - - ![unit-test-set](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/unit-test-set.png) #### Stage 3: Code analysis (optional) @@ -191,7 +168,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![maven-container](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/maven-container.png) -3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Click **withCredentials** and select the SonarQube token (`sonar-token`) from the **Credential ID** list. Enter `SONAR_TOKEN` for **Text Variable**, then click **OK**. +3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Click **withCredentials** and select the SonarQube token (`sonar-token`) from the **Name** list. Enter `SONAR_TOKEN` for **Text Variable**, then click **OK**. ![sonarqube-credentials](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-credentials.png) @@ -199,7 +176,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![nested-step](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/nested-step.png) -5. Click **withSonarQubeEnv**. In the dialog that appears, do not change the default name `sonar` and click **OK** to save it. +5. Click **withSonarQubeEnv**. In the displayed dialog box, do not change the default name `sonar` and click **OK** to save it. ![sonar](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonar.png) @@ -210,7 +187,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n 7. Click **shell** and enter the following command in the command line for the sonarqube branch and authentication. Click **OK** to finish. ```shell - mvn sonar:sonar -o -gs `pwd`/configuration/settings.xml -Dsonar.login=$SONAR_TOKEN + mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN ``` ![sonarqube-shell-new](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-shell-new.png) @@ -221,7 +198,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![timeout](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/timeout.png) -9. Click **Add Nesting Steps** for the **timeout** step and select **waitForQualityGate**. Select **Start the follow-up task after the inspection** in the pop-up dialog. Click **OK** to save it. +9. Click **Add Nesting Steps** for the **timeout** step and select **waitForQualityGate**. Select **Start the follow-up task after the inspection** in the displayed dialog box. Click **OK** to save it. ![waitforqualitygate](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/waitforqualitygate.png) @@ -237,15 +214,15 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![maven-set](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/maven-set.png) -3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Select **shell** from the list, and enter the following command in the pop-up window. Click **OK** to finish. +3. Click **Add Nesting Steps** under the `maven` container to add a nested step. Select **shell** from the list, and enter the following command in the displayed dialog box. Click **OK** to finish. ```shell - mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package + mvn -Dmaven.test.skip=true clean package ``` ![nested-step-maven](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/nested-step-maven.png) -4. Click **Add Nesting Steps** again and select **shell**. Enter the following command in the command line to build a Docker image based on the [Dockerfile](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Dockerfile-online). Click **OK** to confirm. +4. Click **Add Nesting Steps** again and select **shell**. Enter the following command in the command line to build a Docker image based on the [Dockerfile](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Dockerfile-online). Click **OK** to confirm. {{< notice note >}} @@ -259,9 +236,9 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![shell-command](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell-command.png) -5. Click **Add Nesting Steps** again and select **withCredentials**. Fill in the following fields in the dialog. Click **OK** to confirm. +5. Click **Add Nesting Steps** again and select **withCredentials**. Fill in the following fields in the displayed dialog box. Click **OK** to confirm. - - **Credential ID**: Select the Docker Hub credentials you created, such as `dockerhub-id`. + - **Credential Name**: Select the Docker Hub credentials you created, such as `dockerhub-id`. - **Password Variable**: Enter `DOCKER_PASSWORD`. - **Username Variable**: Enter `DOCKER_USERNAME`. @@ -273,7 +250,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![docker-credential](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/docker-credential.png) -6. Click **Add Nesting Steps** (the first one) in the **withCredentials** step created above. Select **shell** and enter the following command in the pop-up window, which is used to log in to Docker Hub. Click **OK** to confirm. +6. Click **Add Nesting Steps** (the first one) in the **withCredentials** step created above. Select **shell** and enter the following command in the displayed dialog box, which is used to log in to Docker Hub. Click **OK** to confirm. ```shell echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin @@ -295,7 +272,7 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![add-artifact-stage](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/add-artifact-stage.png) -2. With the **Artifacts** stage selected, click **Add Step** under **Task** and select **archiveArtifacts**. Enter `target/*.jar` in the dialog, which is used to set the archive path of artifacts in Jenkins. Click **OK** to finish. +2. With the **Artifacts** stage selected, click **Add Step** under **Task** and select **archiveArtifacts**. Enter `target/*.jar` in the displayed dialog box, which is used to set the archive path of artifacts in Jenkins. Click **OK** to finish. ![artifact-info](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/artifact-info.png) @@ -311,18 +288,27 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n {{< notice note >}} - In KubeSphere v3.1, the account that can run a pipeline will be able to continue or terminate the pipeline if there is no reviewer specified. Pipeline creators, accounts with the role of `admin` in a project, or the account you specify will be able to continue or terminate a pipeline. + In KubeSphere 3.2.x, the account that can run a pipeline will be able to continue or terminate the pipeline if there is no reviewer specified. Pipeline creators, accounts with the role of `admin` in a project, or the account you specify will be able to continue or terminate a pipeline. {{}} -3. Click **Add Step** under the **Deploy to Dev** stage again. Select **kubernetesDeploy** from the list and fill in the following fields in the dialog. Click **OK** to save it. +3. Click **Add Step** under the **Deploy to Dev** stage again. Select **container** from the list, name it `maven`, and click **OK**. - - **Kubeconfig**: Select the Kubeconfig you created, such as `demo-kubeconfig`. - - **Configuration File Path**: Enter `deploy/no-branch-dev/**`, which is the relative path of the Kubernetes resource [YAML](https://github.com/kubesphere/devops-java-sample/tree/sonarqube/deploy/no-branch-dev) file in the code repository. +4. Click **Add Nesting Steps** in the `maven` container step. Select **withCredentials** from the list, fill in the following fields in the displayed dialog box, and click **OK**. - ![kubernetesDeploy](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/kubernetesDeploy.png) + - **Credential Name**: Select the kubeconfig credential you created, such as `demo-kubeconfig`. + - **Kubeconfig Variable**: Enter `KUBECONFIG_CONTENT`. -4. If you want to receive email notifications when the pipeline runs successfully, click **Add Step** and select **mail** to add email information. Note that configuring the email server is optional, which means you can still run your pipeline if you skip this step. +5. Click **Add Nesting Steps** in the **withCredentials** step. Select **shell** from the list, enter the following commands in the displayed dialog box, and click **OK**. + + ```shell + mkdir ~/.kube + echo "$KUBECONFIG_CONTENT" > ~/.kube/config + envsubst < deploy/dev-ol/devops-sample-svc.yaml | kubectl apply -f - + envsubst < deploy/dev-ol/devops-sample.yaml | kubectl apply -f - + ``` + +6. If you want to receive email notifications when the pipeline runs successfully, click **Add Step** and select **mail** to add email information. Note that configuring the email server is optional, which means you can still run your pipeline if you skip this step. {{< notice note >}} @@ -330,13 +316,11 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n {{}} -5. When you finish the steps above, click **Confirm** and **Save** in the bottom-right corner. You can see the pipeline now has a complete workflow with each stage clearly listed on the pipeline. When you define a pipeline using the graphical editing panel, KubeSphere automatically creates its corresponding Jenkinsfile. Click **Edit Jenkinsfile** to view the Jenkinsfile. +7. When you finish the steps above, click **Save** in the lower-right corner. You can see the pipeline now has a complete workflow with each stage clearly listed on the pipeline. When you define a pipeline using the graphical editing panel, KubeSphere automatically creates its corresponding Jenkinsfile. Click **Edit Jenkinsfile** to view the Jenkinsfile. - ![pipeline-done](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-done.png) - {{< notice note >}} - On the **Pipelines** page, you can click the three dots on the right side of the pipeline and then select **Copy Pipeline** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. + On the **Pipelines** page, you can click on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. {{}} @@ -346,25 +330,21 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n ![run-pipeline](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/run-pipeline.png) -2. To see the status of a pipeline, go to the **Activity** tab and click the record you want to view. +2. To see the status of a pipeline, go to the **Run Records** tab and click the record you want to view. 3. Wait for a while and the pipeline stops at the stage **Deploy to Dev** if it runs successfully. As the reviewer of the pipeline, `project-admin` needs to approve it before resources are deployed to the development environment. ![pipeline-successful](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-successful.jpg) -4. Log out of KubeSphere and log back in to the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**. +4. Log out of KubeSphere and log back in to the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Run Records** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**. ### Step 6: View pipeline details -1. Log in to the console as `project-regular`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record marked with **Success** under **Status**. +1. Log in to the console as `project-regular`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Run Records** tab, click the record marked with **Successful** under **Status**. 2. If everything runs successfully, you can see that all stages are completed. - ![complete](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/complete.png) - -3. Click **Show Logs** in the top-right corner to inspect all the logs. Click each stage to see detailed logs of it. You can debug any problems based on the logs which also can be downloaded locally for further analysis. - - ![inspect-logs](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/inspect-logs.png) +3. Click **View Logs** in the upper-right corner to inspect all the logs. Click each stage to see detailed logs of it. You can debug any problems based on the logs which also can be downloaded locally for further analysis. ### Step 7: Download the artifact @@ -374,7 +354,7 @@ Click the **Artifacts** tab and then click the icon on the right to download the ### Step 8: View code analysis results -On the **Code Quality** page, view the code analysis result of this example pipeline, which is provided by SonarQube. If you do not configure SonarQube in advance, this section is not available. For more information, see [Integrate SonarQube into Pipelines](../../../devops-user-guide/how-to-integrate/sonarqube/). +On the **Code Check** page, view the code analysis result of this example pipeline, which is provided by SonarQube. If you do not configure SonarQube in advance, this section is not available. For more information, see [Integrate SonarQube into Pipelines](../../../devops-user-guide/how-to-integrate/sonarqube/). ![sonarqube-result-detail](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/sonarqube-result-detail.png) @@ -384,14 +364,8 @@ On the **Code Quality** page, view the code analysis result of this example pipe 2. Go to the project (for example, `kubesphere-sample-dev` in this tutorial), click **Workloads** under **Application Workloads**, and you can see the Deployment appears in the list. - ![view-deployment](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/view-deployment.png) - 3. In **Services**, you can find the port number of the example Service is exposed through a NodePort. To access the Service, visit `:`. - ![service-exposed](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/service-exposed.png) - - ![access-service](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/access-service.jpg) - {{< notice note >}} You may need to configure port forwarding rules and open the port in your security group before you access the Service. @@ -402,7 +376,7 @@ On the **Code Quality** page, view the code analysis result of this example pipe ![dockerhub-image](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/dockerhub-image.png) -5. The app is named `devops-sample` as it is the value of `APP_NAME` and the tag is the value of `SNAPSHOT-$BUILD_NUMBER`. `$BUILD_NUMBER` is the serial number of a record under the **Activity** tab. +5. The app is named `devops-sample` as it is the value of `APP_NAME` and the tag is the value of `SNAPSHOT-$BUILD_NUMBER`. `$BUILD_NUMBER` is the serial number of a record under the **Run Records** tab. 6. If you set the email server and add the email notification step in the final stage, you can also receive the email message. diff --git a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md index e40238d63..82887121e 100644 --- a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md +++ b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md @@ -1,6 +1,6 @@ --- -title: "Create a Jenkins Pipeline Using a Jenkinsfile" -keywords: 'KubeSphere, Kubernetes, docker, spring boot, Jenkins, devops, ci/cd, pipeline' +title: "Create a Pipeline Using a Jenkinsfile" +keywords: 'KubeSphere, Kubernetes, Docker, Spring Boot, Jenkins, DevOps, CI/CD, Pipeline' description: "Learn how to create and run a pipeline by using an example Jenkinsfile." linkTitle: "Create a Pipeline Using a Jenkinsfile" weight: 11210 @@ -20,7 +20,7 @@ Two types of pipelines can be created in KubeSphere: Pipelines created based on - You need to have a [Docker Hub](https://hub.docker.com/) account and a [GitHub](https://github.com/) account. - You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/). -- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. - You need to set a CI dedicated node for running pipelines. Refer to [Set a CI Node for Dependency Caching](../../how-to-use/set-ci-node/). - You need to install and configure SonarQube. Refer to [Integrate SonarQube into Pipeline](../../../devops-user-guide/how-to-integrate/sonarqube/). If you skip this part, there is no **SonarQube Analysis** below. @@ -33,9 +33,9 @@ There are eight stages as shown below in this example pipeline. {{< notice note >}} - **Stage 1. Checkout SCM**: Check out source code from the GitHub repository. -- **Stage 2. Unit test**: It will not proceed with the next stage unit the test is passed. +- **Stage 2. Unit test**: It will not proceed with the next stage until the test is passed. - **Stage 3. SonarQube analysis**: The SonarQube code quality analysis. -- **Stage 4.** **Build & push snapshot image**: Build the image based on selected branches in **Behavioral strategy**. Push the tag of `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` to Docker Hub, the `$BUILD_NUMBER` of which is the operation serial number in the pipeline's activity list. +- **Stage 4. Build & push snapshot image**: Build the image based on selected branches in **Strategy Settings**. Push the tag of `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` to Docker Hub, the `$BUILD_NUMBER` of which is the operation serial number in the pipeline's activity list. - **Stage 5. Push the latest image**: Tag the sonarqube branch as `latest` and push it to Docker Hub. - **Stage 6. Deploy to dev**: Deploy the sonarqube branch to the development environment. Review is required for this stage. - **Stage 7. Push with tag**: Generate the tag and release it to GitHub. The tag is pushed to Docker Hub. @@ -47,7 +47,7 @@ There are eight stages as shown below in this example pipeline. ### Step 1: Create credentials -1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **Project Management**. For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/). +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/). {{< notice note >}} @@ -61,9 +61,7 @@ There are eight stages as shown below in this example pipeline. | github-id | Account Credentials | GitHub | | demo-kubeconfig | kubeconfig | Kubernetes | -2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-a-sonarqube-token-for-a-new-project) to use the token for the **secret** field below. Click **OK** to finish. - - ![sonar-token](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg) +2. You need to create an additional credential (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-a-sonarqube-token-for-a-new-project) to enter your SonarQube token in the **Token** field for a credential of the **Access token** type. Click **OK** to finish. 3. You also need to create a GitHub personal access token with the permission as shown in the below image, and then use the generated token to create Account Credentials (for example, `github-token`) for GitHub authentication in your DevOps project. @@ -77,17 +75,11 @@ There are eight stages as shown below in this example pipeline. 4. In total, you have five credentials in the list. - ![credential-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.png) - ### Step 2: Modify the Jenkinsfile in your GitHub repository -1. Log in to GitHub. Fork [devops-java-sample](https://github.com/kubesphere/devops-java-sample) from the GitHub repository to your own GitHub account. +1. Log in to GitHub. Fork [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) from the GitHub repository to your own GitHub account. - ![fork-github-repo](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg) - -2. In your own GitHub repository of **devops-java-sample**, click the file `Jenkinsfile-online` in the root directory. - - ![jenkins-edit-1](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.png) +2. In your own GitHub repository of **devops-maven-sample**, click the file `Jenkinsfile-online` in the root directory. 3. Click the edit icon on the right to edit environment variables. @@ -95,14 +87,14 @@ There are eight stages as shown below in this example pipeline. | Items | Value | Description | | :--- | :--- | :--- | - | DOCKER\_CREDENTIAL\_ID | dockerhub-id | The **Credential ID** you set in KubeSphere for your Docker Hub account. | - | GITHUB\_CREDENTIAL\_ID | github-id | The **Credential ID** you set in KubeSphere for your GitHub account. It is used to push tags to your GitHub repository. | - | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | The **Credential ID** you set in KubeSphere for your kubeconfig. It is used to access a running Kubernetes cluster. | + | DOCKER\_CREDENTIAL\_ID | dockerhub-id | The **Name** you set in KubeSphere for your Docker Hub account. | + | GITHUB\_CREDENTIAL\_ID | github-id | The **Name** you set in KubeSphere for your GitHub account. It is used to push tags to your GitHub repository. | + | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | The **Name** you set in KubeSphere for your kubeconfig. It is used to access a running Kubernetes cluster. | | REGISTRY | docker.io | It defaults to `docker.io`, serving as the address of pushing images. | | DOCKERHUB\_NAMESPACE | your-dockerhub-account | Replace it with your Docker Hub's account name. It can be the Organization name under the account. | | GITHUB\_ACCOUNT | your-github-account | Replace it with your GitHub account name. For example, your GitHub account name is `kubesphere` if your GitHub address is  `https://github.com/kubesphere/`. It can also be the account's Organization name. | - | APP\_NAME | devops-java-sample | The application name. | - | SONAR\_CREDENTIAL\_ID | sonar-token | The **Credential ID** you set in KubeSphere for the SonarQube token. It is used for code quality test. | + | APP\_NAME | devops-maven-sample | The application name. | + | SONAR\_CREDENTIAL\_ID | sonar-token | The **Name** you set in KubeSphere for the SonarQube token. It is used for code quality test. | {{< notice note >}} @@ -112,15 +104,13 @@ There are eight stages as shown below in this example pipeline. 4. After you edit the environmental variables, click **Commit changes** at the bottom of the page, which updates the file in the SonarQube branch. - ![commit-changes](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg) - ### Step 3: Create projects You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere-sample-prod`, which represent the development environment and the production environment respectively. Related Deployments and Services of the app will be created automatically in these two projects once the pipeline runs successfully. {{< notice note >}} -The account `project-admin` needs to be created in advance since it is the reviewer of the CI/CD Pipeline. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) for more information. +The account `project-admin` needs to be created in advance since it is the reviewer of the CI/CD Pipeline. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) for more information. {{}} @@ -131,111 +121,85 @@ The account `project-admin` needs to be created in advance since it is the revie | kubesphere-sample-dev | development environment | | kubesphere-sample-prod | production environment | -2. After those projects are created, they will be listed in the project list as below: - - ![project-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/project-list.png) +2. After those projects are created, they will be listed in the project list. ### Step 4: Create a pipeline -1. Log out of KubeSphere and log back in as `project-regular`. Go to the DevOps project `demo-devops` and click **Create** to build a new pipeline. +1. Log out of KubeSphere and log back in as `project-regular`. Go to the DevOps project `demo-devops` and click **Create**. - ![create-a-pipeline1](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-a-pipeline1.png) +2. Provide the basic information in the displayed dialog box. Name it `jenkinsfile-in-scm` and specify a code repository under **Code Repository**. -2. Provide the basic information in the dialog that appears. Name it `jenkinsfile-in-scm` and select a code repository. +3. In the **GitHub** tab, select **github-token** from the drop-down list under **Credential**, and then click **OK** to select your repository. - ![create-pipeline-2](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.png) +4. Choose your GitHub account. All the repositories related to this token will be listed on the right. Select **devops-maven-sample** and click **Select**. Click **Next** to continue. -3. In the **GitHub** tab, select **github-token** from the drop-down list, and then click **Confirm** to select your repository. +5. In **Advanced Settings**, select the checkbox next to **Delete outdated branches**. In this tutorial, you can use the default value of **Branch Retention Period (days)** and **Maximum Branches**. - ![select-token](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-token.png) + Delete outdated branches means that you will discard the branch record all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: -4. Choose your GitHub account. All the repositories related to this token will be listed on the right. Select **devops-java-sample** and click **Select This Repository**. Click **Next** to continue. + - Branch Retention Period (days). Branches that exceed the retention period are deleted. - ![select-repo](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-repo.png) - -5. In **Advanced Settings**, check the box next to **Discard old branch**. In this tutorial, you can use the default value of **Days to keep old branches** and **Maximum number branches to keep**. - - ![branch-settings-1](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings-1.png) - - Discarding old branches means that you will discard the branch record all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: - - - Days to keep old branches. Branches will be discarded after a certain number of days. - - - Maximum number of branches to keep. The oldest branches will be discarded after branches reach a certain amount. + - Maximum Branches. The earliest branch is deleted when the number of branches exceeds the maximum number. {{< notice note >}} - **Days to keep old branches** and **Maximum number of branches to keep** apply to branches at the same time. As long as a branch meets the condition of either field, it will be discarded. For example, if you specify 2 as the number of retention days and 3 as the maximum number of branches, any branches that exceed either number will be discarded. KubeSphere repopulates these two fields with -1 by default, which means deleted branches will be discarded. + **Branch Retention Period (days)** and **Maximum Branches** apply to branches at the same time. As long as a branch meets the condition of either field, it is deleted. For example, if you specify 2 as the retention period and 3 as the maximum number of branches, any branch that exceed either number is deleted. KubeSphere prepopulates these two fields with 7 and 5 by default respectively. {{}} -6. In **Behavioral strategy**, KubeSphere offers four strategies by default. You can delete **Discover PR from Forks** as this strategy will not be used in this example. You do not need to change the setting and can use the default value directly. - - ![remove-behavioral-strategy](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.png) +6. In **Strategy Settings**, KubeSphere offers four strategies by default. You can delete **Discover PRs from Forks** as this strategy will not be used in this example. You do not need to change the setting and can use the default value directly. As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch. **Discover Branches** - - **Exclude branches that are also filed as PRs**. The source branch is not scanned such as the origin's master branch. These branches need to be merged. - - **Only branches that are also filed as PRs**. Only scan the PR branch. - - **All branches**. Pull all the branches from the repository origin. + - **Exclude branches filed as PRs**. The source branch is not scanned such as the origin's master branch. These branches need to be merged. + - **Include only branches filed as PRs**. Only scan the PR branch. + - **Include all branches**. Pull all the branches from the repository origin. - **Discover PR from Origin** + **Discover PRs from Origin** - - **Source code version of PR merged with target branch**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. - - **Source code version of PR itself**. A pipeline is created and runs based on the source code of the PR itself. - - **Two pipelines are created when a PR is discovered**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. + - **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. + - **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. + - **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. {{< notice note >}} - You have to choose GitHub as your code repository to enable the settings of **Behavioral strategy** here. + You have to choose GitHub as your code repository to enable the settings of **Strategy Settings** here. {{}} -7. Scroll down to **Script Path**. The field specifies the Jenkinsfile path in the code repository. It indicates the repository's root directory. If the file location changes, the script path also needs to be changed. Please change it to `Jenkinsfile-online`, which is the file name of Jenkinsfile in the example repository located in the root directory. +7. Scroll down to **Script Path**. The field specifies the Jenkinsfile path in the code repository. It indicates the repository's root directory. If the file location changes, the script path also needs to be changed. Change it to `Jenkinsfile-online`, which is the file name of Jenkinsfile in the example repository located in the root directory. - ![jenkinsfile-online](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg) - -8. In **Scan Repo Trigger**, check **If not, scan regularly** and set the interval to **5 minutes**. Click **Create** to finish. - - ![advanced-setting1](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting1.png) +8. In **Scan Trigger**, select **Scan periodically** and set the interval to **5 minutes**. Click **Create** to finish. {{< notice note >}} - You can set a specific interval to allow pipelines to scan remote repositories, so that any code updates or new PRs can be detected based on the strategy you set in **Behavioral strategy**. + You can set a specific interval to allow pipelines to scan remote repositories, so that any code updates or new PRs can be detected based on the strategy you set in **Strategy Settings**. {{}} ### Step 5: Run a pipeline -1. After a pipeline is created, it displays in the list below. Click it to go to its detail page. - - ![pipeline-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.png) +1. After a pipeline is created, click its name to go to its details page. {{< notice note >}} - - You can click the three dots on the right side of the pipeline and then select **Copy Pipeline** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. - - The pipeline detail page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Success** icon if the synchronization is successful. + - You can click on the right side of the pipeline and then select **Copy** to create a copy of it. If you need to concurrently run multiple pipelines that don't contain multiple branches, you can select all of these pipelines and then click **Run** to run them in a batch. + - The pipeline details page shows **Sync Status**. It reflects the synchronization result between KubeSphere and Jenkins, and you can see the **Successful** icon if the synchronization is successful. {{}} -2. Under **Activity**, three branches are being scanned. Click **Run** on the right and the pipeline runs based on the behavioral strategy you set. Select **sonarqube** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new activity. - - ![pipeline-detail](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.png) - - ![tag-name](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg) +2. Under **Run Records**, three branches are being scanned. Click **Run** on the right and the pipeline runs based on the behavioral strategy you set. Select **sonarqube** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new activity. {{< notice note >}} - - If you do not see any activity on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button). + - If you do not see any run records on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button). - The tag name is used to generate releases and images with the tag in GitHub and Docker Hub. An existing tag name cannot be used again for the field `TAG_NAME`. Otherwise, the pipeline will not be running successfully. {{}} -3. Wait for a while and you can see some activities stop and some fail. Click the first one to view details. - - ![activity-failure](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/activity-faliure.png) +3. Wait for a while, and you can see some activities stop and some fail. Click the first one to view details. {{< notice note >}} @@ -245,8 +209,6 @@ The account `project-admin` needs to be created in advance since it is the revie 4. The pipeline pauses at the stage `deploy to dev`. You need to click **Proceed** manually. Note that the pipeline will be reviewed three times as `deploy to dev`, `push with tag`, and `deploy to production` are defined in the Jenkinsfile respectively. - ![pipeline-proceed](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.png) - In a development or production environment, it requires someone who has higher authority (for example, release manager) to review the pipeline, images, as well as the code analysis result. They have the authority to determine whether the pipeline can go to the next stage. In the Jenkinsfile, you use the section `input` to specify who reviews the pipeline. If you want to specify a user (for example, `project-admin`) to review it, you can add a field in the Jenkinsfile. If there are multiple users, you need to use commas to separate them as follows: ```groovy @@ -257,36 +219,24 @@ The account `project-admin` needs to be created in advance since it is the revie {{< notice note >}} - In KubeSphere v3.1, the account that can run a pipeline will be able to continue or terminate the pipeline if there is no reviewer specified. Pipeline creators, accounts with the role of `admin` in the project, or the account you specify will be able to continue or terminate the pipeline. + In KubeSphere 3.2.x, the account that can run a pipeline will be able to continue or terminate the pipeline if there is no reviewer specified. Pipeline creators, accounts with the role of `admin` in the project, or the account you specify will be able to continue or terminate the pipeline. {{}} ### Step 6: Check pipeline status -1. In **Task Status**, you can see how a pipeline is running. Please note that the pipeline will keep initializing for several minutes after it is just created. There are eight stages in the sample pipeline and they have been defined separately in [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online). +1. In **Task Status**, you can see how a pipeline is running. Please note that the pipeline will keep initializing for several minutes after it is just created. There are eight stages in the sample pipeline and they have been defined separately in [Jenkinsfile-online](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Jenkinsfile-online). - ![inspect-pipeline-log-1](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.png) - -2. Check the pipeline running logs by clicking **Show Logs** in the top-right corner. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. - - ![inspect-pipeline-log-2](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg) +2. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis. ### Step 7: Verify results -1. Once you successfully executed the pipeline, click **Code Quality** to check the results through SonarQube as follows. - - ![code-quality](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/code-quality.png) - - ![sonarqube-result-detail](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.png) +1. Once you successfully executed the pipeline, click **Code Check** to check the results through SonarQube as follows. 2. The Docker image built through the pipeline has also been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs. - ![docker-hub-result](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.png) - 3. At the same time, a new tag and a new release have been generated in GitHub. - ![github-result](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/github-result.png) - 4. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. Go to these two projects and here are the expected result: | Environment | URL | Namespace | Deployment | Service | @@ -294,14 +244,6 @@ The account `project-admin` needs to be created in advance since it is the revie | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | - #### Deployments - - ![pipeline-deployments](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.png) - - #### Services - - ![devops-prod](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.png) - {{< notice note >}} You may need to open the port in your security groups so that you can access the app with the URL. @@ -310,13 +252,9 @@ The account `project-admin` needs to be created in advance since it is the revie ### Step 8: Access the example Service -1. To access the Service, log in to KubeSphere as `admin` to use the **web kubectl** from **Toolbox**. Go to the project `kubesphere-sample-dev`, and select `ks-sample-dev` in **Services** under **Application Workloads**. The endpoint can be used to access the Service. +1. To access the Service, log in to KubeSphere as `admin` to use the **kubectl** from **Toolbox**. Go to the project `kubesphere-sample-dev`, and click `ks-sample-dev` in **Services** under **Application Workloads**. Obtain the endpoint displayed on the details page to access the Service. - ![click-service](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/click-service.png) - - ![access-endpoint](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.png) - -2. Use the **web kubectl** from **Toolbox** in the bottom-right corner by executing the following command: +2. Use the **kubectl** from **Toolbox** in the lower-right corner by executing the following command: ```bash curl 10.233.120.230:8080 diff --git a/content/en/docs/devops-user-guide/how-to-use/credential-management.md b/content/en/docs/devops-user-guide/how-to-use/credential-management.md index 17524605c..06c8a3cae 100644 --- a/content/en/docs/devops-user-guide/how-to-use/credential-management.md +++ b/content/en/docs/devops-user-guide/how-to-use/credential-management.md @@ -10,13 +10,11 @@ Credentials are objects containing sensitive information, such as usernames and A DevOps project user with necessary permissions can configure credentials for Jenkins pipelines. Once the user adds or configures these credentials in a DevOps project, they can be used in the DevOps project to interact with third-party applications. -Currently, you can store the following 4 types of credentials in a DevOps project: +Currently, you can create the following 4 types of credentials in a DevOps project: -![create-credential-page](/images/docs/devops-user-guide/using-devops/credential-management/create-credential-page.png) - -- **Account Credentials**: Username and password which can be handled as separate components or as a colon-separated string in the format `username:password`, such as accounts of GitHub, GitLab, and Docker Hub. -- **SSH**: Username with a private key, an SSH public/private key pair. -- **Secret Text**: Secret content in a file. +- **Username and password**: Username and password which can be handled as separate components or as a colon-separated string in the format `username:password`, such as accounts of GitHub, GitLab, and Docker Hub. +- **SSH key**: Username with a private key, an SSH public/private key pair. +- **Access token**: a token with certain access. - **kubeconfig**: It is used to configure cross-cluster authentication. If you select this type, the dialog will auto-populate the field with the kubeconfig file of the current Kubernetes cluster. This tutorial demonstrates how to create and manage credentials in a DevOps project. For more information about how credentials are used, see [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/) and [Create a Pipeline Using Graphical Editing Panels](../create-a-pipeline-using-graphical-editing-panel/). @@ -24,31 +22,27 @@ This tutorial demonstrates how to create and manage credentials in a DevOps proj ## Prerequisites - You have enabled [KubeSphere DevOps System](../../../pluggable-components/devops/). -- You have a workspace, a DevOps project and an account (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You have a workspace, a DevOps project and a user (`project-regular`) invited to the DevOps project with the `operator` role. If they are not ready yet, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create Credentials -Log in to the console of KubeSphere as `project-regular`. Navigate to your DevOps project, choose **Credentials** and click **Create**. - -![create-credential-step1](/images/docs/devops-user-guide/using-devops/credential-management/create-credential-step1.png) +Log in to the console of KubeSphere as `project-regular`. Navigate to your DevOps project, select **Credentials** and click **Create**. ### Create Docker Hub credentials -1. In the dialog that appears, provide the following information. +1. In the displayed dialog box, provide the following information. - ![dockerhub-credentials](/images/docs/devops-user-guide/using-devops/credential-management/dockerhub-credentials.png) - - - **Credential ID**: Set an ID, such as `dockerhub-id`, which can be used in pipelines. - - **Type**: Select **Account Credentials**. - - **Username**: Your Docker Hub account (i.e Docker ID). - - **Token/Password**: Your Docker Hub password. + - **Name**: Set a name, such as `dockerhub-id`, which can be used in pipelines. + - **Type**: Select **Username and password**. + - **Username**: Your Docker Hub account (for example, Docker ID). + - **Password/Token**: Your Docker Hub password. - **Description**: A brief introduction to the credentials. 2. Click **OK** when you finish. ### Create GitHub credentials -Similarly, follow the same steps above to create GitHub credentials. Set a different Credential ID (for example, `github-id`) and also select **Account Credentials** for **Type**. Enter your GitHub username and password for **Username** and **Token/Password** respectively. +Similarly, follow the same steps above to create GitHub credentials. Set a different credential name (for example, `github-id`) and also select **Username and password** for **Type**. Enter your GitHub username and password for **Username** and **Password/Token** respectively. {{< notice note >}} @@ -58,7 +52,7 @@ If there are any special characters such as `@` and `$` in your account or passw ### Create kubeconfig credentials -Similarly, follow the same steps above to create kubeconfig credentials. Set a different Credential ID (for example, `demo-kubeconfig`) and select **kubeconfig**. +Similarly, follow the same steps above to create kubeconfig credentials. Set a different credential name (for example, `demo-kubeconfig`) and select **kubeconfig**. {{< notice info >}} @@ -68,18 +62,12 @@ A file that is used to configure access to clusters is called a kubeconfig file. ## View and Manage Credentials -1. Credentials created appear in the list as below. +1. Credentials created are displayed in the list. - ![credentials-list](/images/docs/devops-user-guide/using-devops/credential-management/credentials-list.png) - -2. Click any of them to go to its detail page, where you can see account details and all the events related to the credentials. - - ![credential-detail-page](/images/docs/devops-user-guide/using-devops/credential-management/credential-detail-page.png) +2. Click any of them to go to its details page, where you can see account details and all the events related to the credentials. 3. You can also edit or delete credentials on this page. Note that when you edit credentials, KubeSphere does not display the existing username or password information. The previous one will be overwritten if you enter a new username and password. - ![edit-credentials](/images/docs/devops-user-guide/using-devops/credential-management/edit-credentials.png) - ## See Also [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/) diff --git a/content/en/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md b/content/en/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md index 1c2e733a5..11160cd0c 100644 --- a/content/en/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md +++ b/content/en/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md @@ -8,19 +8,19 @@ weight: 11291 [GitLab](https://about.gitlab.com/) is an open source code repository platform that provides public and private repositories. It is a complete DevOps platform that enables professionals to perform their tasks in a project. -In KubeSphere v3.1, you can create a multi-branch pipeline with GitLab in your DevOps project. This tutorial demonstrates how to create a multi-branch pipeline with GitLab. +In KubeSphere 3.1.x and later, you can create a multi-branch pipeline with GitLab in your DevOps project. This tutorial demonstrates how to create a multi-branch pipeline with GitLab. ## Prerequisites - You need to have a [GitLab](https://gitlab.com/users/sign_in) account and a [Docker Hub](https://hub.docker.com/) account. - You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/). -- You need to create a workspace, a DevOps project and an account (`project-regular`). This account must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Create credentials -1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **Project Management**. For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/). +1. Log in to the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **DevOps Project Settings**. For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/). {{< notice note >}} @@ -36,29 +36,19 @@ In KubeSphere v3.1, you can create a multi-branch pipeline with GitLab in your D 2. After creation, you can see the credentials in the list. - ![credential-created](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/credential-created.png) - ### Step 2: Modify the Jenkinsfile in your GitLab repository -1. Log in to GitLab and create a public project. Click **Import project/repository**, select **Repo by URL** to enter the URL of [devops-java-sample](https://github.com/kubesphere/devops-java-sample), select **Public** for **Visibility Level**, and then click **Create project**. - - ![click-import-project](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-import-project.png) - - ![use-git-url](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/use-git-url.png) +1. Log in to GitLab and create a public project. Click **Import project/repository**, select **Repo by URL** to enter the URL of [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample), select **Public** for **Visibility Level**, and then click **Create project**. 2. In the project just created, create a new branch from the master branch and name it `gitlab-demo`. - ![new-branch](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/new-branch.png) - 3. In the `gitlab-demo` branch, click the file `Jenkinsfile-online` in the root directory. - ![click-jenkinsfile](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-jenkinsfile.png) - 4. Click **Edit**, change `GITHUB_CREDENTIAL_ID`, `GITHUB_ACCOUNT`, and `@github.com` to `GITLAB_CREDENTIAL_ID`, `GITLAB_ACCOUNT`, and `@gitlab.com` respectively, and then edit the following items. You also need to change the value of `branch` in the `push latest` and `deploy to dev` stages to `gitlab-demo`. | Item | Value | Description | | -------------------- | --------- | ------------------------------------------------------------ | - | GITLAB_CREDENTIAL_ID | gitlab-id | The **Credential ID** you set in KubeSphere for your GitLab account. It is used to push tags to your GitLab repository. | + | GITLAB_CREDENTIAL_ID | gitlab-id | The **Name** you set in KubeSphere for your GitLab account. It is used to push tags to your GitLab repository. | | DOCKERHUB_NAMESPACE | felixnoo | Replace it with your Docker Hub’s account name. It can be the Organization name under the account. | | GITLAB_ACCOUNT | felixnoo | Replace it with your GitLab account name. It can also be the account’s Group name. | @@ -70,8 +60,6 @@ In KubeSphere v3.1, you can create a multi-branch pipeline with GitLab in your D 5. Click **Commit changes** to update this file. - ![commit-changes](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/commit-changes.png) - ### Step 3: Create projects You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere-sample-prod`, which represent the development environment and the production environment respectively. For more information, refer to [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/#step-3-create-projects). @@ -80,23 +68,21 @@ You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere 1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click **Create** to create a new pipeline. -2. Provide the basic information in the dialog that appears. Name it `gitlab-multi-branch` and select a code repository. +2. Provide the basic information in the displayed dialog box. Name it `gitlab-multi-branch` and select a code repository. - ![create-pipeline](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/create-pipeline.png) - -3. In the **GitLab** tab, select the default option `https://gitlab.com` for GitLab Server, enter the username of the GitLab project owner for **Owner**, and then select the `devops-java-sample` repository from the drop-down list for **Repository Name**. Click the tick icon in the bottom-right corner and then click **Next**. - - ![select-gitlab](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-gitlab.png) +3. On the **GitLab** tab, select the default option `https://gitlab.com` for **GitLab Server Address**, enter the username of the GitLab project owner for **Project Group/Owner**, and then select the `devops-maven-sample` repository from the drop-down list for **Code Repository**. Click **√** in the lower-right corner and then click **Next**. {{< notice note >}} - If you want to use a private repository from GitLab, you need to create an access token with API and read_repository permissions on GitLab, create a credential for accessing GitLab on the Jenkins dashboard, and then add the credential in **GitLab Server** under **Configure System**. For more information about how to log in to Jenkins, refer to [Jenkins System Settings](../jenkins-setting/#log-in-to-jenkins-to-reload-configurations). + If you want to use a private repository from GitLab, refer to the following steps: + + - Go to **User Settings > Access Tokens** on GitLab to create an access token with API and read_repository permissions. + - [Log in to the Jenkins dashboard](../../how-to-integrate/sonarqube/#step-5-add-the-sonarqube-server-to-jenkins), go to **Manage Jenkins > Manage Credentials** to use your GitLab token to create a Jenkins credential for accessing GitLab, and go to **Manage Jenkins > Configure System** to add the credential in **GitLab Server**. + - In your DevOps project, select **DevOps Project Settings > Credentials** to use your GitLab token to create a credential. You have to specify the credential for **Credential** on the **GitLab** tab when creating a pipeline so that the pipeline can pull code from your private GitLab repository. {{}} -4. In the **Advanced Settings** tab, scroll down to **Script Path**. Change it to `Jenkinsfile-online` and then click **Create**. - - ![jenkinsfile-online](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png) +4. On the **Advanced Settings** tab, scroll down to **Script Path**. Change it to `Jenkinsfile-online` and then click **Create**. {{< notice note >}} @@ -106,13 +92,9 @@ You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere ### Step 5: Run a pipeline -1. After a pipeline is created, it displays in the list. Click it to go to its detail page. +1. After a pipeline is created, it is displayed in the list. Click its name to go to its details page. -2. Click **Run** on the right. In the dialog that appears, select **gitlab-demo** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new activity. - - ![click-run](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-run.png) - - ![select-branch](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-branch.png) +2. Click **Run** on the right. In the displayed dialog box, select **gitlab-demo** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new run. {{< notice note >}} @@ -122,23 +104,15 @@ You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere ### Step 6: Check the pipeline status -1. In the **Task Status** tab, you can see how a pipeline is running. Check the pipeline running logs by clicking **Show Logs** in the top-right corner. - - ![check-log](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/check-log.png) +1. In the **Task Status** tab, you can see how a pipeline is running. Check the pipeline running logs by clicking **View Logs** in the upper-right corner. 2. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you can click it to inspect logs, which can also be downloaded to your local machine for further analysis. - ![pipeline-logs](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/pipeline-logs.png) - ### Step 7: Verify results 1. The Docker image built through the pipeline has been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs. - ![docker-image](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/docker-image.png) - -2. At the same time, a new tag has been generated in GitLab. - - ![gitlab-result](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/gitlab-result.png) +2. At the same time, a new tag is generated in GitLab. 3. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. @@ -147,10 +121,6 @@ You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | - ![deployment](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/deployment.png) - - ![service](/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/service.png) - {{< notice note >}} You may need to open the port in your security groups so that you can access the app with the URL. For more information, refer to [Access the example Service](../create-a-pipeline-using-jenkinsfile/#step-8-access-the-example-service). diff --git a/content/en/docs/devops-user-guide/how-to-use/jenkins-email.md b/content/en/docs/devops-user-guide/how-to-use/jenkins-email.md index a57422afb..b696c0e35 100644 --- a/content/en/docs/devops-user-guide/how-to-use/jenkins-email.md +++ b/content/en/docs/devops-user-guide/how-to-use/jenkins-email.md @@ -12,27 +12,25 @@ The built-in Jenkins cannot share the same email configuration with the platform ## Prerequisites - You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/). -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. ## Set the Email Server -1. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. If you have enabled the [multi-cluster feature](../../../multicluster-management/) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. -3. Go to **Workloads** under **Application Workloads**, and choose the project **kubesphere-devops-system** from the drop-down list. Click on the right of **ks-jenkins** to edit its YAML. +3. Go to **Workloads** under **Application Workloads**, and select the project **kubesphere-devops-system** from the drop-down list. Click on the right of `devops-jenkins` and select **Edit YAML** to edit its YAML. - ![workloads-list](/images/docs/devops-user-guide/using-devops/jenkins-email/workloads-list.png) - -4. Scroll down to the fields in the image below which you need to specify. Click **Update** when you finish to save changes. +4. Scroll down to the fields in the image below which you need to specify. Click **OK** when you finish to save changes. {{< notice warning >}} - Once you modify the Email server in the `ks-jenkins` Deployment, it will restart itself. Consequently, the DevOps system will be unavailable for a few minutes. Please make such modification at an appropriate time. + Once you modify the Email server in the `devops-jenkins` Deployment, it will restart itself. Consequently, the DevOps system will be unavailable for a few minutes. Please make such modification at an appropriate time. {{}} - ![set-jenkins-email-3](/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email-3.jpg) + ![set-jenkins-email](/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email.png) | Environment Variable Name | Description | | ------------------------- | -------------------------------- | diff --git a/content/en/docs/devops-user-guide/how-to-use/jenkins-setting.md b/content/en/docs/devops-user-guide/how-to-use/jenkins-setting.md index ac094c769..bddfc1e5d 100644 --- a/content/en/docs/devops-user-guide/how-to-use/jenkins-setting.md +++ b/content/en/docs/devops-user-guide/how-to-use/jenkins-setting.md @@ -28,61 +28,17 @@ Besides, you can find the `formula.yaml` file in the repository [ks-jenkins](htt It is recommended that you configure Jenkins in KubeSphere through Configuration as Code (CasC). The built-in Jenkins CasC file is stored as a [ConfigMap](../../../project-user-guide/configuration/configmaps/). -1. Log in to KubeSphere as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to KubeSphere as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. If you have enabled the [multi-cluster feature](../../../multicluster-management/) with member clusters imported, you can select a specific cluster to edit the ConfigMap. If you have not enabled the feature, refer to the next step directly. -3. From the navigation bar, select **ConfigMaps** under **Configurations**. On the **ConfigMaps** page, select `kubesphere-devops-system` from the drop-down list and click `jenkins-casc-config`. +3. On the left navigation pane, select **ConfigMaps** under **Configuration**. On the **ConfigMaps** page, select `kubesphere-devops-system` from the drop-down list and click `jenkins-casc-config`. - ![edit-configmap](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-configmap.png) +4. On the details page, click **Edit YAML** from the **More** drop-down list. -4. On the detail page, click **Edit YAML** from the **More** drop-down list. +5. The configuration template for `jenkins-casc-config` is a YAML file under the `data.jenkins_user.yaml:` section. You can modify the container image, label, resource requests and limits, etc. in the broker (Kubernetes Jenkins agent) in the ConfigMap or add a container in the podTemplate. When you finish, click **OK**. - ![more-list](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/more-list.png) - -5. The configuration template for `jenkins-casc-config` is a YAML file as shown below. You can modify the container image, label, resource requests and limits, etc. in the broker (Kubernetes Jenkins agent) in the ConfigMap or add a container in the podTemplate. When you finish, click **Update**. - - ![edit-jenkins](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-jenkins.png) - -## Log in to Jenkins to Reload Configurations - -After you modified `jenkins-casc-config`, you need to reload your updated system configuration on the **Configuration as Code** page on the Jenkins dashboard. This is because system settings configured directly through the Jenkins dashboard may be overwritten by the CasC configuration after Jenkins is rescheduled. - -1. Execute the following command to get the address of Jenkins. - - ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) - export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - ``` - -2. You can see the expected output as below, which tells you the IP address and port number of Jenkins. - - ```bash - http://192.168.0.4:30180 - ``` - -3. Access Jenkins at `http://Node IP:Port Number`. When KubeSphere is installed, the Jenkins dashboard is also installed by default. Besides, Jenkins is configured with KubeSphere LDAP, which means you can log in to Jenkins with KubeSphere accounts (for example, `admin/P@88w0rd`) directly. - - ![jenkins-dashboard](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/jenkins-dashboard.jpg) - - {{< notice note >}} - - You may need to set up necessary port forwarding rules and open port `30180` to access Jenkins in your security groups depending on where your instances are deployed. - - {{}} - -4. After you log in to the dashboard, click **Manage Jenkins** from the navigation bar. - - ![manage-jenkins](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/manage-jenkins.png) - -5. Scroll down and click **Configuration as Code**. - - ![configuration-as-code](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/configuration-as-code.png) - -6. To reload configurations that you have modified in the ConfigMap, click **Apply new configuration**. - - ![apply-config](/images/docs/devops-user-guide/using-devops/jenkins-system-settings/apply-config.png) +6. Wait for at least 70 seconds until your changes are automatically reloaded. 7. For more information about how to set up Jenkins via CasC, see the [Jenkins documentation](https://github.com/jenkinsci/configuration-as-code-plugin). @@ -90,4 +46,5 @@ After you modified `jenkins-casc-config`, you need to reload your updated system In the current version, not all plugins support CasC settings. CasC will only overwrite plugin configurations that are set up through CasC. - {{}} \ No newline at end of file + {{}} + diff --git a/content/en/docs/devops-user-guide/how-to-use/jenkins-shared-library.md b/content/en/docs/devops-user-guide/how-to-use/jenkins-shared-library.md index 59fef9c80..69b39491a 100644 --- a/content/en/docs/devops-user-guide/how-to-use/jenkins-shared-library.md +++ b/content/en/docs/devops-user-guide/how-to-use/jenkins-shared-library.md @@ -13,32 +13,26 @@ This tutorial demonstrates how to use Jenkins shared libraries in KubeSphere Dev ## Prerequisites - You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/). -- You need to create a workspace, a DevOps project and an account (`project-regular`). This account must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to have a Jenkins shared library available. This tutorial uses the Jenkins shared library in [a GitHub repository](https://github.com/devops-ws/jenkins-shared-library) as an example. ## Configure a Shared Library on the Jenkins Dashboard -1. [Log in to the Jenkins dashboard](../jenkins-setting/#log-in-to-jenkins-to-reload-configurations) and click **Manage Jenkins** in the left navigation bar. +1. [Log in to the Jenkins dashboard](../../how-to-integrate/sonarqube/#step-5-add-the-sonarqube-server-to-jenkins) and click **Manage Jenkins** in the left navigation pane. -2. Scroll down and click **Configure System**. - - ![click_configure](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-configure.png) +2. Scroll down and click **Configure System**. 3. Scroll down to **Global Pipeline Libraries** and click **Add**. - ![click-add](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-add.png) - 4. Configure the fields as below. - **Name**. Set a name (for example, `demo-shared-library`) for the shared library so that you can import the shared library by referring to this name in a Jenkinsfile. - **Default version**. Set a branch name from the repository where you put your shared library as the default branch for importing your shared library. Enter `master` for this tutorial. - - Under **Retrieval method**, choose **Modern SCM**. + - Under **Retrieval method**, select **Modern SCM**. - - Under **Source Code Management**, choose **Git** and enter the URL of the example repository for **Project Repository**. You have to configure **Credentials** if you use your own repository that requires the credentials for accessing it. - - ![configure-shared-library](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/configure-shared-library.png) + - Under **Source Code Management**, select **Git** and enter the URL of the example repository for **Project Repository**. You have to configure **Credentials** if you use your own repository that requires the credentials for accessing it. 5. When you finish editing, click **Apply**. @@ -54,19 +48,13 @@ This tutorial demonstrates how to use Jenkins shared libraries in KubeSphere Dev 1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click **Create** on the **Pipelines** page. -2. Set a name (for example, `demo-shared-library`) in the pop-up window and click **Next**. +2. Set a name (for example, `demo-shared-library`) in the displayed dialog box and click **Next**. - ![set-name](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/set-name.png) - -3. In **Advanced Settings**, click **Create** directly to create a pipeline with the default settings. - - ![click-create](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-create.png) +3. On the **Advanced Settings** tab, click **Create** to create a pipeline with the default settings. ### Step 2: Edit the pipeline -1. In the pipeline list, click the pipeline to go to its detail page and click **Edit Jenkinsfile**. - - ![edit-jenkinsfile](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/edit-jenkinsfile.png) +1. In the pipeline list, click the pipeline to go to its details page and click **Edit Jenkinsfile**. 2. In the displayed dialog box, enter the following example Jenkinsfile. When you finish editing, click **OK**. @@ -126,15 +114,9 @@ This tutorial demonstrates how to use Jenkins shared libraries in KubeSphere Dev ### Step 3: Run the pipeline -1. You can view the stage under the **Pipeline** tab. Click **Run** to run it. +1. You can view the stage under the **Task Status** tab. Click **Run** to run it. - ![click-run](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-run.png) +2. After a while, the pipeline ran successfully. -2. After a while, the pipeline will run successfully. - - ![run-successfully](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/run-successfully.png) - -3. You can click the **Success** record under **Status**, and then click **Show Logs** to view the log details. - - ![log-details](/images/docs/devops-user-guide/using-devops/jenkins-shared-library/log-details.png) +3. You can click the **Successful** record under **Run Records**, and then click **View Logs** to view the log details. diff --git a/content/en/docs/devops-user-guide/how-to-use/pipeline-settings.md b/content/en/docs/devops-user-guide/how-to-use/pipeline-settings.md index 6586597fb..a193a8a19 100644 --- a/content/en/docs/devops-user-guide/how-to-use/pipeline-settings.md +++ b/content/en/docs/devops-user-guide/how-to-use/pipeline-settings.md @@ -10,169 +10,141 @@ When you create a pipeline, you can customize its configurations through various ## Prerequisites -- You need to create a workspace, a DevOps project and an account (`project-regular`). This account must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a DevOps project and a user (`project-regular`). This user must be invited to the DevOps project with the `operator` role. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/). -## Pipeline Settings - -### Basic Information - -![basic-info-tab](/images/docs/devops-user-guide/using-devops/pipeline-settings/basic-info-tab.png) +## Basic Information On the **Basic Information** tab, you can customize the following information: - **Name**. The name of the pipeline. Pipelines in the same DevOps project must have different names. -- **Project**. Projects will be grouped by their resources, which you can view and manage by project. +- **DevOps Project**. The DevOps project to which the pipeline belongs. - **Description**. The additional information to describe the pipeline. Description is limited to 256 characters. -- **Code Repository (optional)**. You can select a code repository as the code source for the pipeline. In KubeSphere v3.1, you can select GitHub, GitLab, Bitbucket, Git, and SVN as the code source. +- **Code Repository (optional)**. You can select a code repository as the code source for the pipeline. You can select GitHub, GitLab, Bitbucket, Git, and SVN as the code source. {{< tabs >}} {{< tab "GitHub" >}} - ![code-source-github](/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-github.png) - - If you select **GitHub**, you have to specify the token for accessing GitHub. If you have created a credential with your GitHub token in advance, you can select it from the drop-down list, or you can click **Create a credential** to create a new one. Click **Confirm** after selecting the token and you can view your repository on the right. Click the **√** icon after you finish all operations. + If you select **GitHub**, you have to specify the credential for accessing GitHub. If you have created a credential with your GitHub token in advance, you can select it from the drop-down list, or you can click **Create Credential** to create a new one. Click **OK** after selecting the credential and you can view your repository on the right. Click the **√** icon after you finish all operations. {{}} {{< tab "GitLab" >}} - ![code-source-gitlab](/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-gitlab.png) - - If you select **GitLab**, you have to specify the GitLab server, owner and repository name. You also need to specify a credential if it is needed for obtaining repository codes. Click the **√** icon after you finish all operations. + If you select **GitLab**, you have to specify the GitLab server address, project group/owner, and code repository. You also need to specify a credential if it is needed for accessing the code repository. Click the **√** icon after you finish all operations. {{}} {{< tab "Bitbucket" >}} - ![code-source-bitbucket](/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-bitbucket.png) - - If you select **Bitbucket**, you have to enter your Bitbucket server. You can create a credential with your Bitbucket username and password in advance and then select the credential from the drop-down list, or you can click **Create a credential** to create a new one. Click **Confirm** after entering the information and you can view your repository on the right. Click the **√** icon after you finish all operations. + If you select **Bitbucket**, you have to enter your Bitbucket server address. You can create a credential with your Bitbucket username and password in advance and then select the credential from the drop-down list, or you can click **Create Credential** to create a new one. Click **OK** after entering the information, and you can view your repository on the right. Click the **√** icon after you finish all operations. {{}} {{< tab "Git" >}} - ![code-source-git](/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-git.png) - - If you select **Git**, you have to specify the repository URL. You need to specify a credential if it is needed for obtaining repository codes. You can also click **Create a credential** to create a new credential. Click the **√** icon after you finish all operations. + If you select **Git**, you have to specify the repository URL. You need to specify a credential if it is needed for accessing the code repository. You can also click **Create Credential** to create a new credential. Click the **√** icon after you finish all operations. {{}} {{< tab "SVN" >}} - ![code-source-svn](/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-svn.png) - If you select **SVN**, you have to specify the repository URL and the credential. You can also specify the branch included and excluded based on your needs. Click the **√** icon after you finish all operations. {{}} {{}} -### Advanced Settings with A Code Repository Selected +## Advanced Settings with Code Repository Specified -If you selected a code repository, you can customize the following configurations on the **Advanced Settings** tab: +If you specify a code repository, you can customize the following configurations on the **Advanced Settings** tab: -**Branch Settings** +### Branch Settings -![branch-settings](/images/docs/devops-user-guide/using-devops/pipeline-settings/branch-settings.png) +**Delete outdated branches**. Delete outdated branches automatically. The branch record is deleted all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: -**Discard old branch** means that the branch record will be discarded all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded: +- **Branch Retention Period (days)**. Branches that exceeds the retention period are deleted. -- **Days to keep old branches**. Branches will be discarded after a certain number of days. - -- **Maximum number of branches to keep**. The oldest branches will be discarded after branches reach a certain amount. +- **Maximum Branches**. When the number of branches exceeds the maximum number, the earliest branch is deleted. {{< notice note >}} - **Days to keep old branches** and **Maximum number of branches to keep** apply to branches at the same time. As long as a branch meets the condition of either field, it will be discarded. For example, if you specify 2 as the number of retention days and 3 as the maximum number of branches, any branches that exceed either number will be discarded. KubeSphere prepopulates these two fields with -1 by default, which means deleted branches will be discarded. + **Branch Retention Period (days)** and **Maximum Branches** apply to branches at the same time. As long as a branch meets the condition of either field, it will be discarded. For example, if you specify 2 as the number of retention days and 3 as the maximum number of branches, any branches that exceed either number will be discarded. KubeSphere prepopulates these two fields with 7 and 5 by default respectively. {{}} -**Behavioral strategy** +### Strategy Settings -![behavioral-strategy](/images/docs/devops-user-guide/using-devops/pipeline-settings/behavioral-strategy.png) - -In **Behavioral strategy**, KubeSphere offers four strategies by default. As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch. +In **Strategy Settings**, KubeSphere offers four strategies by default. As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch. **Discover Branches** -- **Exclude branches that are also filed as PRs**. The source branch is not scanned such as the origin’s master branch. These branches need to be merged. -- **Only branches that are also filed as PRs**. Only scan the PR branch. -- **All branches**. Pull all the branches from the repository origin. +- **Exclude branches filed as PRs**. The branches filed as PRs are excluded. +- **Include only branches filed as PRs**. Only pull the branches filed as PRs. +- **Include all branches**. Pull all the branches from the repository. -**Discover Tag Branches** +**Discover Tags** -- **Enable discovery of Tag branch**. The branch with a specific tag will be scanned. -- **Disable the discovery of the Tag branch**. The branch with a specific tag will not be scanned. +- **Enable tag discovery**. The branch with a specific tag is scanned. +- **Disable tag discovery**. The branch with a specific tag is not scanned. -**Discover PR from Origin** +**Discover PRs from Origin** -- **Source code version of PR merged with target branch**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. -- **Source code version of PR itself**. A pipeline is created and runs based on the source code of the PR itself. -- **Two pipelines are created when a PR is discovered**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. +- **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. +- **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. +- **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. -**Discover PR from Forks** +**Discover PRs from Forks** -- **Source code version of PR merged with target branch**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. -- **Source code version of PR itself**. A pipeline is created and runs based on the source code of the PR itself. -- **Two pipelines are created when a PR is discovered**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. +- **Pull the code with the PR merged**. A pipeline is created and runs based on the source code after the PR is merged into the target branch. +- **Pull the code at the point of the PR**. A pipeline is created and runs based on the source code of the PR itself. +- **Create two pipelines respectively**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself. - **Contributors**. The users who make contributions to the PR. - **Everyone**. Every user who can access the PR. -- **From users with Admin or Write permission**. Only from users with Admin or Write permission to the PR. -- **Nobody**. If you select this option, no PR will be discovered despite the option you select in **Pull Strategy**. +- **Users with the admin or write permission**. Only from users with the admin or write permission to the PR. +- **None**. If you select this option, no PR will be discovered despite the option you select in **Pull Strategy**. -**Script Path** +### Filter by Regex -![script-path](/images/docs/devops-user-guide/using-devops/pipeline-settings/script-path.png) +Select the checkbox to specify a regular expression to filter branches, PRs, and tags. -The field of **Script Path** specifies the Jenkinsfile path in the code repository. It indicates the repository’s root directory. If the file location changes, the script path also needs to be changed. +### Script Path -**Scan Repo Trigger** +The **Script Path** parameter specifies the Jenkinsfile path in the code repository. It indicates the repository’s root directory. If the file location changes, the script path also needs to be changed. -![scan-repo-trigger](/images/docs/devops-user-guide/using-devops/pipeline-settings/scan-repo-trigger.png) +### Scan Trigger -You can check **Enable regular expressions, ignoring names that do not match the provided regular expression (including branches and PRs)** to specify a regular expression as the trigger for scanning the code repository. +Select **Scan periodically** and set the scan interval from the drop-down list. -You can also check **If not, scan regularly** and set the scan interval from the drop-down list. +### Build Trigger -**Build Trigger** +You can select a pipeline from the drop-down list for **Trigger on Pipeline Creation** and **Trigger on Pipeline Deletion** so that when a new pipeline is created or a pipeline is deleted, the tasks in the specified pipeline can be automatically triggered. -![build-trigger](/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger.png) +### Clone Settings -You can select a pipeline from the drop-down list for **When Create Pipeline** and **When Delete Pipeline** so that when a new pipeline is created or a pipeline is deleted, the tasks in the specified pipeline can be automatically triggered. +- **Clone Depth**. The number of commits to fetch when you clone. +- **Clone Timeout Period (min)**. The number of minutes before which the cloning process has to complete. +- **Enable shallow clone**. Enable the shallow clone or not. If you enable it, the codes cloned will not contain tags. -**Git Clone Options** +### Webhook -![git-clone-options](/images/docs/devops-user-guide/using-devops/pipeline-settings/git-clone-options.png) +**Webhook** is an efficient way to allow pipelines to discover changes in the remote code repository and automatically trigger a new running. Webhook should be the primary method to trigger Jenkins automatic scanning for GitHub and Git (for example, GitLab). -- **clone depth**. The number of commits to fetch when you clone. -- **Pipeline clone timeout (in minutes)**. The number of minutes before which the cloning process has to complete. -- **Whether to enable shallow clone**. Enable the shallow clone or not. If you enable it, the codes cloned will not contain tags. +## Advanced Settings with No Code Repository Specified -**Webhook Push** +If you do not specify a code repository, you can customize the following configurations on the **Advanced Settings** tab: -![webhook-push](/images/docs/devops-user-guide/using-devops/pipeline-settings/webhook-push.png) +### Build Settings -**Webhook Push** is an efficient way to allow pipelines to discover changes in the remote code repository and automatically trigger a new running. Webhook should be the primary method to trigger Jenkins automatic scanning for GitHub and Git (for example, GitLab). +**Delete outdated build records**. Determine when the build records under the branch are deleted. The build record includes the console output, archived artifacts, and other metadata related to a particular build. Keeping fewer builds saves disk space used by Jenkins. KubeSphere provides two options to determine when old builds are deleted: -### Advanced Settings with No Code Repository Selected +- **Build Record Retention Period (days)**. Build records that exceed the retention period are deleted. -If you don't select a code repository, you can customize the following configurations on the **Advanced Settings** tab: - -**Build Settings** - -![build-settings](/images/docs/devops-user-guide/using-devops/pipeline-settings/build-settings.png) - -**Discard old builds** determines when the build records under the branch will be discarded. The build record includes the console output, archived artifacts, and other metadata related to a particular build. Keeping fewer builds saves disk space used by Jenkins. KubeSphere provides two options to determine when old builds are discarded: - -- **Days to keep build**. The build will be discarded after a certain number of days. - -- **Maximum number of builds to keep**. If the existing number of builds exceeds the maximum number, the oldest build will be discarded. +- **Maximum Build Records**. When the number of build records exceeds the maximum number, the earliest build record is deleted. {{< notice note >}} @@ -180,20 +152,15 @@ If you don't select a code repository, you can customize the following configura {{}} -- **No concurrent builds**. If you check this option, you cannot run multiple builds concurrently. +- **No concurrent builds**. If you select this option, you cannot run multiple builds concurrently. -**Parametric Build** +### Build Parameters -![parametric-build](/images/docs/devops-user-guide/using-devops/pipeline-settings/parametric-build.png) +The parameterized build process allows you to pass in one or more parameters when you start to run a pipeline. KubeSphere provides five types of parameters by default, including **String**, **Multi-line string**, **Boolean**, **Options**, and **Password**. When you parameterize a project, the build is replaced with a parameterized build, which prompts the user to enter a value for each defined parameter. -The parameterized build process allows you to pass in one or more parameters when you start to run a pipeline. KubeSphere provides five types of parameters by default, including **String**, **Text**, **Boolean**, **Choice**, and **Password**. When you parameterize a project, the build is replaced with a parameterized build, which prompts the user to enter a value for each defined parameter. +### Build Trigger -**Build Trigger** - -![build-trigger-2](/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger-2.png) - -- **Scheduled build**. It enables builds with a specified schedule. You can click **CRON** to refer to the detailed cron syntax. -- **Trigger a remote build (for example, using a script)**. If you need to access a predefined URL to remotely trigger the build, you have to check it and provide an authentication token so that only the user who has the token can remotely trigger the build. +**Build periodically**. It enables builds with a specified schedule. Click **Learn More** to see the detailed CRON syntax. diff --git a/content/en/docs/devops-user-guide/how-to-use/pipeline-webhook.md b/content/en/docs/devops-user-guide/how-to-use/pipeline-webhook.md index 3b69f5757..c91f7017f 100644 --- a/content/en/docs/devops-user-guide/how-to-use/pipeline-webhook.md +++ b/content/en/docs/devops-user-guide/how-to-use/pipeline-webhook.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to trigger a pipeline by using a webhook. ## Prerequisites - You need to [enable the KubeSphere DevOps system](../../../pluggable-components/devops/). -- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account needs to be invited to the DevOps project and assigned the `operator` role. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. +- You need to create a workspace, a DevOps project, and a user (`project-regular`). This account needs to be invited to the DevOps project and assigned the `operator` role. See [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready. - You need to create a Jenkinsfile-based pipeline from a remote code repository. For more information, see [Create a Pipeline Using a Jenkinsfile](../create-a-pipeline-using-jenkinsfile/). @@ -23,69 +23,45 @@ This tutorial demonstrates how to trigger a pipeline by using a webhook. 1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click a pipeline (for example, `jenkins-in-scm`) to go to its details page. -2. Click **More** and select **Edit Config** in the drop-down list. +2. Click **More** and select **Edit Settings** in the drop-down list. - ![edit-config](/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-config.png) - -3. In the displayed dialog box, scroll down to **Webhook Push** to obtain the webhook push URL. - - ![webhook-push](/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-push.png) +3. In the displayed dialog box, scroll down to **Webhook** to obtain the webhook push URL. ### Set a webhook in the GitHub repository -1. Log in to GitHub and go to your own repository `devops-java-sample`. +1. Log in to GitHub and go to your own repository `devops-maven-sample`. 2. Click **Settings**, click **Webhooks**, and click **Add webhook**. - ![click-add-webhook](/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-add-webhook.png) - 3. Enter the webhook push URL of the pipeline for **Payload URL** and click **Add webhook**. This tutorial selects **Just the push event** for demonstration purposes. You can make other settings based on your needs. For more information, see [the GitHub document](https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks). - ![add-webhook](/images/docs/devops-user-guide/using-devops/pipeline-webhook/add-webhook.png) - 4. The configured webhook is displayed on the **Webhooks** page. - ![webhook-ready](/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-ready.png) - ## Trigger the Pipeline by Using the Webhook ### Submit a pull request to the repository -1. On the **Code** page of your own repository, click **master** and then select **sonarqube**. - - ![click-sonar](/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-sonar.png) +1. On the **Code** page of your own repository, click **master** and then select the **sonarqube** branch. 2. Go to `/deploy/dev-ol/` and click the file `devops-sample.yaml`. - ![click-file](/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-file.png) - 3. Click to edit the file. For example, change the value of `spec.replicas` to `3`. - ![edit-file](/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-file.png) - 4. Click **Commit changes** at the bottom of the page. ### Check the webhook deliveries 1. On the **Webhooks** page of your own repository, click the webhook. - ![webhook-ready](/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-ready.png) - 2. Click **Recent Deliveries** and click a specific delivery record to view its details. - ![delivery-detail](/images/docs/devops-user-guide/using-devops/pipeline-webhook/delivery-detail.png) - ### Check the pipeline 1. Log in to the KubeSphere web console as `project-regular`. Go to your DevOps project and click the pipeline. -2. On the **Activity** tab, check that a new run is triggered by the pull request submitted to the `sonarqube` branch of the remote repository. - - ![pipeline-triggered](/images/docs/devops-user-guide/using-devops/pipeline-webhook/pipeline-triggered.png) +2. On the **Run Records** tab, check that a new run is triggered by the pull request submitted to the `sonarqube` branch of the remote repository. 3. Go to the **Pods** page of the project `kubesphere-sample-dev` and check the status of the 3 Pods. If the status of the 3 Pods is running, the pipeline is running properly. - ![pods](/images/docs/devops-user-guide/using-devops/pipeline-webhook/pods.png) - diff --git a/content/en/docs/devops-user-guide/how-to-use/set-ci-node.md b/content/en/docs/devops-user-guide/how-to-use/set-ci-node.md index fe72badaa..660579e26 100644 --- a/content/en/docs/devops-user-guide/how-to-use/set-ci-node.md +++ b/content/en/docs/devops-user-guide/how-to-use/set-ci-node.md @@ -12,29 +12,23 @@ This tutorial demonstrates how to set CI nodes so that KubeSphere schedules task ## Prerequisites -You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. ## Label a CI Node -1. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. If you have enabled the [multi-cluster feature](../../../multicluster-management/) with Member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly. -3. Navigate to **Cluster Nodes** under **Node Management**, where you can see existing nodes in the current cluster. +3. Navigate to **Cluster Nodes** under **Nodes**, where you can see existing nodes in the current cluster. - ![Node Management](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-1.png) +4. Select a node from the list to run CI tasks. Click the node name to go to its details page. Click **More** and select **Edit Labels**. -4. Choose a node from the list to run CI tasks. For example, select `node2` here and click it to go to its detail page. Click **More** and select **Edit Labels**. - - ![Select CI Node](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-2.png) - -5. In the dialog that appears, you can see a label with the key `node-role.kubernetes.io/worker`. Enter `ci` for its value and click **Save**. - - ![Add CI Label](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-3.png) +5. In the displayed dialog box, you can see a label with the key `node-role.kubernetes.io/worker`. Enter `ci` for its value and click **Save**. {{< notice note >}} - You can also click **Add Labels** to add new labels based on your needs. + You can also click **Add** to add new labels based on your needs. {{}} @@ -42,18 +36,12 @@ You need an account granted a role including the permission of **Cluster Managem Basically, pipelines and S2I/B2I workflows will be scheduled to this node according to [node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity). If you want to make the node a dedicated one for CI tasks, which means other workloads are not allowed to be scheduled to it, you can add a [taint](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to it. -1. Click **More** and select **Taint Management**. +1. Click **More** and select **Edit Taints**. - ![Select CI Node](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-2.png) - -2. Click **Add Taint** and enter a key `node.kubernetes.io/ci` without specifying a value. You can choose `NoSchedule` or `PreferNoSchedule` based on your needs. - - ![Add Taint](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-4.png) +2. Click **Add Taint** and enter a key `node.kubernetes.io/ci` without specifying a value. You can choose `Prevent scheduling`, `Prevent scheduling if possible`, or `Prevent scheduling and evict existing Pods` based on your needs. 3. Click **Save**. KubeSphere will schedule tasks according to the taint you set. You can go back to work on your DevOps pipeline now. - ![Taint Result](/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-5.png) - {{< notice tip >}} This tutorial also covers the operation related to node management. For detailed information, see [Node Management](../../../cluster-administration/nodes/). diff --git a/content/en/docs/devops-user-guide/how-to-use/use-pipeline-templates.md b/content/en/docs/devops-user-guide/how-to-use/use-pipeline-templates.md index 8bf734029..3a150b79f 100644 --- a/content/en/docs/devops-user-guide/how-to-use/use-pipeline-templates.md +++ b/content/en/docs/devops-user-guide/how-to-use/use-pipeline-templates.md @@ -6,9 +6,9 @@ linkTitle: "Use Pipeline Templates" weight: 11290 --- -KubeSphere offers a graphical editing panel where the stages and steps of a Jenkins pipeline can be defined through interactive operations. In KubeSphere v3.1, two built-in pipeline templates are provided as frameworks of continuous integration (CI) and continuous delivery (CD). +KubeSphere offers a graphical editing panel where the stages and steps of a Jenkins pipeline can be defined through interactive operations. In KubeSphere 3.2.1, two built-in pipeline templates are provided as frameworks of continuous integration (CI) and continuous delivery (CD). -When you have a pipeline created in your DevOps project on KubeSphere, you can click the pipeline to go to its detail page, and then click **Edit Pipeline** to select a pipeline template based on your needs. This document illustrates the concept of these two pipeline templates. +When you have a pipeline created in your DevOps project on KubeSphere, you can click the pipeline to go to its details page, and then click **Edit Pipeline** to select a pipeline template based on your needs. This document illustrates the concept of these two pipeline templates. ## CI Pipeline Template diff --git a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md index 6104d58ab..cc9df9f5e 100644 --- a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md +++ b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md @@ -10,33 +10,25 @@ This tutorial demonstrates how to create and manage DevOps projects. ## Prerequisites -- You need to create a workspace and an account (`project-admin`). The account must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to enable the [KubeSphere DevOps system](../../../pluggable-components/devops/). ## Create a DevOps Project 1. Log in to the console of KubeSphere as `project-admin`. Go to **DevOps Projects** and click **Create**. - ![devops-project-create](/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create.png) - 2. Provide the basic information for the DevOps project and click **OK**. - ![create-devops](/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops.png) - - **Name**: A concise and clear name for this DevOps project, which is convenient for users to identify, such as `demo-devops`. - **Alias**: The alias name of the DevOps project. - **Description**: A brief introduction to the DevOps project. - **Cluster Settings**: In the current version, a DevOps project cannot run across multiple clusters at the same time. If you have enabled [the multi-cluster feature](../../../multicluster-management/), you must select the cluster where your DevOps project runs. -3. A DevOps project will appear in the list below after created. - - ![devops-list](/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list.png) +3. A DevOps project is displayed in the list below after created. ## View a DevOps Project -Click the DevOps project just created to go to its detail page. Tenants with different permissions are allowed to perform various tasks in a DevOps project, including creating CI/CD pipelines and credentials, and managing accounts and roles. - -![devops-detail-page](/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page.png) +Click the DevOps project just created to go to its details page. Tenants with different permissions are allowed to perform various tasks in a DevOps project, including creating CI/CD pipelines and credentials, and managing accounts and roles. ### Pipelines @@ -52,8 +44,6 @@ Similar to a project, a DevOps project also requires users to be granted differe ## Edit or Delete a DevOps Project -1. Click **Basic Information** under **Project Management**, and you can see an overview of the current DevOps project, including the number of project roles and members, project name and project creator. +1. Click **Basic Information** under **DevOps Project Settings**, and you can see an overview of the current DevOps project, including the number of project roles and members, project name and project creator. -2. Click **Project Management** on the right, and you can edit the basic information of the DevOps project or delete it. - - ![project-basic-info](/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info.png) \ No newline at end of file +2. Click **Manage** on the right, and you can edit the basic information of the DevOps project or delete it. diff --git a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md index 39c5532a3..da0eacb6f 100644 --- a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md +++ b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md @@ -8,9 +8,9 @@ weight: 11110 DevOps is a set of practices and tools that automate the processes between IT and software development teams. Among other things, as agile software development sees increasing popularity, continuous integration (CI) and continuous delivery (CD) have become an ideal solution in this connection. In a CI/CD workflow, every integration is tested through automatic building, including coding, releasing and testing. This helps developers to identify any integration errors beforehand and teams can deliver internal software to a production environment with speed, security, and reliability. -Nevertheless, the traditional master-agent architecture of Jenkins (i.e. multiple agents work for a master) has the following shortcomings. +Nevertheless, the traditional controller-agent architecture of Jenkins (i.e. multiple agents work for a controller) has the following shortcomings. -- The entire CI/CD pipeline will crash once the master goes down. +- The entire CI/CD pipeline will crash once the controller goes down. - Resources are not allocated equally as some agents see pipeline jobs wait in queue while others remain idle. - Different agents may be configured in different environments and require different coding languages. The disparity can cause inconvenience in management and maintenance. @@ -29,15 +29,11 @@ The KubeSphere DevOps system provides you with the following features: - [Graphical editing panels](../../../devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel/) to create pipelines with a low learning curve. - A powerful tool integration mechanism such as [SonarQube](../../../devops-user-guide/how-to-integrate/sonarqube/) for code quality check. -![pipeline-list](/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list.png) - -![sonarqube-result-detail](/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.jpg) - ### KubeSphere CI/CD pipeline workflows -A KubeSphere CI/CD pipeline runs on the back of the underlying Kubernetes Jenkins agents. These Jenkins agents can be dynamically scaled as they are dynamically provisioned or released based on the job status. The Jenkins master and agents run as Pods on KubeSphere nodes. The master runs on one of the nodes with its configuration data stored in a volume. Agents run across nodes while they may not be active all the time because they are created dynamically and deleted automatically as needed. +A KubeSphere CI/CD pipeline runs on the back of the underlying Kubernetes Jenkins agents. These Jenkins agents can be dynamically scaled as they are dynamically provisioned or released based on the job status. The Jenkins controller and agents run as Pods on KubeSphere nodes. The controller runs on one of the nodes with its configuration data stored in a volume. Agents run across nodes while they may not be active all the time because they are created dynamically and deleted automatically as needed. -When the Jenkins master receives a building request, it dynamically creates Jenkins agents that run in Pods according to labels. At the same time, Jenkins agents will be registered in the master. After agents finish their jobs, they will be released and related Pods will be deleted as well. +When the Jenkins controller receives a building request, it dynamically creates Jenkins agents that run in Pods according to labels. At the same time, Jenkins agents will be registered in the controller. After agents finish their jobs, they will be released and related Pods will be deleted as well. ### Dynamically provision Jenkins agents @@ -47,4 +43,4 @@ The advantages of dynamically provisioning Jenkins agents are: **High scalability**. When a KubeSphere cluster has insufficient resources which lead to long waiting time of jobs in the queue, you can add new nodes to the cluster. -**High availability**. When a Jenkins master fails, KubeSphere automatically creates a new Jenkins master container with the volume mounted to the new container. In this way, the data are secured with high availability achieved for the cluster. +**High availability**. When a Jenkins controller fails, KubeSphere automatically creates a new Jenkins controller container with the volume mounted to the new container. In this way, the data are secured with high availability achieved for the cluster. diff --git a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md index 4ea78a9e6..0f30c6567 100644 --- a/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md +++ b/content/en/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md @@ -17,11 +17,11 @@ In DevOps project scope, you can grant the following resources' permissions to a ## Prerequisites -At least one DevOps project has been created, such as `demo-devops`. Besides, you need an account of the `admin` role (for example, `devops-admin`) at the DevOps project level. +At least one DevOps project has been created, such as `demo-devops`. Besides, you need a user of the `admin` role (for example, `devops-admin`) at the DevOps project level. ## Built-in Roles -In **Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a DevOps project is created and they cannot be edited or deleted. +In **DevOps Project Roles**, there are three available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a DevOps project is created and they cannot be edited or deleted. | Built-in Roles | Description | | ------------------ | ------------------------------------------------------------ | @@ -35,27 +35,21 @@ In **Project Roles**, there are three available built-in roles as shown below. B {{< notice note >}} - The account `devops-admin` is used as an example. As long as the account you are using is granted a role including the permissions of **Project Member Viewing**, **Project Role Management** and **Project Role Viewing** in **Access Control** at DevOps project level, it can create a DevOps project role. + The account `devops-admin` is used as an example. As long as the account you are using is granted a role including the permissions of **Member Viewing**, **Role Management** and **Role Viewing** in **Access Control** at DevOps project level, it can create a DevOps project role. {{}} -2. Go to **Project Roles** in **Project Management**, click **Create** and set a **Name**. In this example, a role named `pipeline-creator` will be created. Click **Edit Permissions** to continue. - - ![devops_role_step1](/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step1.png) +2. Go to **DevOps Project Roles** in **DevOps Project Settings**, click **Create** and set a **Name**. In this example, a role named `pipeline-creator` will be created. Click **Edit Permissions** to continue. 3. In **Pipeline Management**, select the permissions that you want this role to contain. For example, **Pipeline Management** and **Pipeline Viewing** are selected for this role. Click **OK** to finish. - ![devops_role_step2](/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step2.png) - {{< notice note >}} **Depends on** means the major permission (the one listed after **Depends on**) needs to be selected first so that the affiliated permission can be assigned. {{}} -4. Newly created roles will be listed in **Project Roles**. You can click on the right to edit it. - - ![devops_role_list](/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_list.png) +4. Newly created roles will be listed in **DevOps Project Roles**. You can click on the right to edit it. {{< notice note >}} @@ -65,11 +59,9 @@ In **Project Roles**, there are three available built-in roles as shown below. B ## Invite a New Member -1. In **Project Management**, select **Project Members** and click **Invite Member**. +1. In **DevOps Project Settings**, select **DevOps Project Members** and click **Invite**. -2. Click to invite an account to the DevOps project. Grant the role of `pipeline-creator` to the account. - - ![devops_invite_member](/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_invite_member.png) +2. Click to invite a user to the DevOps project. Grant the role of `pipeline-creator` to the account. {{< notice note >}} @@ -77,9 +69,8 @@ In **Project Roles**, there are three available built-in roles as shown below. B {{}} -3. After you add a user to the DevOps project, click **OK**. In **Project Members**, you can see the newly invited member listed. +3. After you add a user to the DevOps project, click **OK**. In **DevOps Project Members**, you can see the newly invited member listed. 4. You can also change the role of an existing member by editing it or remove it from the DevOps project. - ![devops_user_edit](/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_user_edit.png) diff --git a/content/en/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md b/content/en/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md index acefe87ac..3d8c868d1 100644 --- a/content/en/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md +++ b/content/en/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md @@ -12,9 +12,9 @@ This tutorial demonstrates how to add an existing Kubernetes namespace to a Kube ## Prerequisites -- You need an account granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to an account. +- You need a user granted a role including the permission of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the permission and assign it to a user. -- You have an available workspace so that the namespace can be assigned to it. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You have an available workspace so that the namespace can be assigned to it. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a Kubernetes Namespace @@ -28,15 +28,11 @@ For more information about creating a Kubernetes namespace, see [Namespaces Walk ## Add the Namespace to a KubeSphere Workspace -1. Log in to the KubeSphere console as `admin` and go to the **Cluster Management** page. Click **Projects**, and you can see all your projects (i.e. namespaces) running on the current cluster, including the one just created. +1. Log in to the KubeSphere console as `admin` and go to the **Cluster Management** page. Click **Projects**, and you can see all your projects running on the current cluster, including the one just created. 2. The namespace created through kubectl does not belong to any workspace. Click on the right and select **Assign Workspace**. - ![project-page](/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/project-page.png) - -3. In the dialog that appears, select a **Target Workspace** and a **Project Manager** for the project and click **OK**. +3. In the dialog that appears, select a **Workspace** and a **Project Administrator** for the project and click **OK**. 4. Go to your workspace and you can see the project on the **Projects** page. - ![workspace-project](/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/workspace-project.png) - diff --git a/content/en/docs/faq/access-control/cannot-login.md b/content/en/docs/faq/access-control/cannot-login.md index 2837a78bf..804e6e53c 100644 --- a/content/en/docs/faq/access-control/cannot-login.md +++ b/content/en/docs/faq/access-control/cannot-login.md @@ -1,22 +1,22 @@ --- -title: "Account Login Failure" -keywords: "login failure, account is not active, KubeSphere, Kubernetes" +title: "User Login Failure" +keywords: "login failure, user is not active, KubeSphere, Kubernetes" description: "How to solve the issue of login failure" -linkTitle: "Account Login Failure" +linkTitle: "User Login Failure" Weight: 16440 --- -KubeSphere automatically creates a default account (`admin/P@88w0rd`) when it is installed. An account cannot be used for login if the status is not **Active** or you use an incorrect password. +KubeSphere automatically creates a default user (`admin/P@88w0rd`) when it is installed. A user cannot be used for login if the status is not **Active** or you use an incorrect password. -Here are some of the frequently asked questions about account login failure. +Here are some of the frequently asked questions about user login failure. -## Account Not Active +## User Not Active You may see an image below when the login fails. To find out the reason and solve the issue, perform the following steps: ![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/account-not-active.png) -1. Execute the following command to check the status of your account. +1. Execute the following command to check the status of the user. ```bash $ kubectl get users @@ -88,7 +88,7 @@ kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spe ![incorrect-password](/images/docs/faq/access-control-and-account-management/cannot-login/wrong-password.png) -Run the following command to verify that the account and the password are correct. +Run the following command to verify that the username and the password are correct. ``` curl -u : "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes" @@ -139,24 +139,3 @@ You need to restore Redis and make sure it is running normally with good network ``` kubectl -n kubesphere-system rollout restart deploy ks-console ``` - - -## Unable to Log in through a Third Party Account after Upgrading - -![forbidden](/images/docs/faq/access-control-and-account-management/cannot-login/forbidden.jpg) - -```js -{ - code: 403, - kind: 'Status', - apiVersion: 'v1', - metadata: {}, - status: 'Failure', - message: 'users.iam.kubesphere.io is forbidden: User "system:pre-registration" cannot create resource "users" in API group "iam.kubesphere.io" at the cluster scope', - reason: 'Forbidden', - details: { group: 'iam.kubesphere.io', kind: 'users' }, - statusText: 'Forbidden' -} -``` - -This is a bug in the process of upgrading from V3.0.0 to v3.1.0. For more information about related issues and solutions, see https://github.com/kubesphere/kubesphere/issues/3850. \ No newline at end of file diff --git a/content/en/docs/faq/access-control/forgot-password.md b/content/en/docs/faq/access-control/forgot-password.md index 527ed01bb..fa2bec3b4 100644 --- a/content/en/docs/faq/access-control/forgot-password.md +++ b/content/en/docs/faq/access-control/forgot-password.md @@ -8,9 +8,15 @@ Weight: 16410 ## Reset the Password of a Regular User -The administrator who has the permission to manage users can change an account password. On the **Accounts** page, click the account of which you need to change the password. On the detail page, select **Change Password** from the **More** drop-down list. +1. Log in to the KubeSphere web console using the administrator who has the permission to manage users. -![modify-password](/images/docs/faq/forgot-password/modify-password.png) +2. Click **Platform** on the upper-left corner and select **Access Control**. Click **Users**. + +3. On the **Users** page, click the user of which you need to change the password to visit its details page. + +4. On the details page, click **More**, and then select **Change Password** from the drop-down list. + +5. On the displayed dialog box, enter a new password and confirm the password. Click **OK** after finished. ## Reset the Administrator Password @@ -22,6 +28,6 @@ kubectl patch users -p '{"spec":{"password":""}}' --typ {{< notice note >}} -Make sure you replace `` and `` with the account and the new password in the command before you run it. +Make sure you replace `` and `` with the username and the new password in the command before you run it. {{}} \ No newline at end of file diff --git a/content/en/docs/faq/applications/remove-built-in-apps.md b/content/en/docs/faq/applications/remove-built-in-apps.md index c176b8bd0..7b2afcda6 100644 --- a/content/en/docs/faq/applications/remove-built-in-apps.md +++ b/content/en/docs/faq/applications/remove-built-in-apps.md @@ -6,35 +6,27 @@ linkTitle: "Remove Built-in Apps in KubeSphere" Weight: 16910 --- -As an open source and app-centric container platform, KubeSphere integrates 17 built-in apps in the App Store that is based on [OpenPitrix](https://github.com/openpitrix/openpitrix). They are accessible to all tenants in a workspace, while you can also remove them from the App Store. This tutorial demonstrates how to remove a built-in app from the App Store. +As an open source and app-centric container platform, KubeSphere integrates apps in the App Store that is based on [OpenPitrix](https://github.com/openpitrix/openpitrix). They are accessible to all tenants in a workspace, while you can also remove them from the App Store. This tutorial demonstrates how to remove a built-in app from the App Store. ## Prerequisites -- You need to use an account with the role of `platform-admin` (for example, `admin`) for this tutorial. +- You need to use a user with the role of `platform-admin` (for example, `admin`) for this tutorial. - You need to [enable the App Store](../../../pluggable-components/app-store/). ## Remove a Built-in App 1. Log in to the web console of KubeSphere as `admin`, click **Platform** in the upper-left corner, and then select **App Store Management**. -2. On the **App Store** page, you can see all 17 built-in apps in the list. Select an app that you want to remove from the App Store. For example, click **Tomcat** to go to its detail page. - - ![click-tomcat](/images/docs/faq/applications/remove-built-in-apps/click_tomcat.png) +2. On the **Apps** page, you can see all apps in the list. Select an app that you want to remove from the App Store. For example, click **Tomcat** to go to its detail page. 3. On the detail page of Tomcat, click **Suspend App** to remove the app. - ![suspend-tomcat](/images/docs/faq/applications/remove-built-in-apps/suspend_tomcat.png) - -4. In the dialog that appears, click **OK** to confirm your operation. - - ![confirm-suspend](/images/docs/faq/applications/remove-built-in-apps/confirm_suspend.png) +4. In the displayed dialog box, click **OK** to confirm your operation. 5. To make the app available again in the App Store, click **Activate App** and then click **OK** to confirm your operation. - ![activate-tomcat](/images/docs/faq/applications/remove-built-in-apps/activate_tomcat.png) - {{< notice note >}} - You can also create a new account with necessary roles based on your needs. For more information about managing apps in KubeSphere, refer to [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). + You can also create a new user with necessary roles based on your needs. For more information about managing apps in KubeSphere, refer to [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). {{}} \ No newline at end of file diff --git a/content/en/docs/faq/applications/reuse-the-same-app-name-after-deletion.md b/content/en/docs/faq/applications/reuse-the-same-app-name-after-deletion.md deleted file mode 100644 index 85c3837d3..000000000 --- a/content/en/docs/faq/applications/reuse-the-same-app-name-after-deletion.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Reuse the Same App Name after Its Deletion" -keywords: "KubeSphere, OpenPitrix, Application, App" -description: "Learn how to reuse the same app name after its deletion." -linkTitle: "Reuse the Same App Name after Its Deletion" -Weight: 16920 ---- - -To deploy an app in KubeSphere, tenants can go to the App Store and select the available app based on their needs. However, tenants could experience errors when deploying an app with the same app name as that of the deleted one. This tutorial demonstrates how to use the same app name after its deletion. - -## Prerequisites - -- You need to use an account invited to your project with the role of `operator`. This tutorial uses the account `project-regular` for demonstration purposes. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). -- You need to [enable the App Store](../../../pluggable-components/app-store/). - -## Reuse the Same App Name - -### Deploy an app from the App Store - -1. Log in to the web console of KubeSphere as `project-regular` and deploy an app from the App Store. This tutorial uses Redis as an example app and set the app name as `redis-1`. For more information about how to deploy Redis, refer to [Deploy Redis on KubeSphere](../../../application-store/built-in-apps/redis-app/). - - ![redis-1](/images/docs/faq/applications/use-the-same-app-name-after-deletion/redis-1.PNG) - -2. Click the app to go to its detail page, and then click **Delete** to delete it. - - ![delete-redis-1](/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-redis-1.PNG) - -### Reuse the same app name - -1. If you try to deploy a new Redis app with the same app name as `redis-1`, you can see the following error prompt in the upper-right corner. - - ![error-prompt](/images/docs/faq/applications/use-the-same-app-name-after-deletion/error-prompt.PNG) - -2. In your project, go to **Secrets** under **Configurations**, and enter `redis-1` in the search bar to search the Secret. - - ![search-secret](/images/docs/faq/applications/use-the-same-app-name-after-deletion/search-secret.PNG) - -3. Click the Secret to go to its detail page, and click **More** to select **Delete** from the drop-down menu. - - ![delete-secret](/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-secret.PNG) - -4. In the dialog that appears, enter the Secret name and click **OK** to delete it. - - ![confirm-delete](/images/docs/faq/applications/use-the-same-app-name-after-deletion/confirm-delete.PNG) - -5. Now, you can deploy a new Redis app with the same app name as `redis-1`. - - ![new-redis-app](/images/docs/faq/applications/use-the-same-app-name-after-deletion/new-redis-app.PNG) diff --git a/content/en/docs/faq/console/change-console-language.md b/content/en/docs/faq/console/change-console-language.md index 361d70162..dd073eb63 100644 --- a/content/en/docs/faq/console/change-console-language.md +++ b/content/en/docs/faq/console/change-console-language.md @@ -6,7 +6,7 @@ linkTitle: "Change the Console Language" Weight: 16530 --- -The KubeSphere web console is currently available in four languages: Simplified Chinese, Traditional Chinese, English and Spanish. +The KubeSphere web console is currently available in four languages: Simplified Chinese, Traditional Chinese, English, and Spanish. This tutorial demonstrates how to change the language of the console. @@ -16,10 +16,10 @@ You have installed KubeSphere. ## Change the Console Language -1. Log in to KubeSphere with your account and click the account name in the upper-right corner. +1. Log in to KubeSphere and click the username in the upper-right corner. 2. In the drop-down list, select **User Settings**. 3. On the **Basic Information** page, select a desired language from the **Language** drop-down list. -4. Click to save it. \ No newline at end of file +4. Click to save it. \ No newline at end of file diff --git a/content/en/docs/faq/console/edit-resources-in-system-workspace.md b/content/en/docs/faq/console/edit-resources-in-system-workspace.md index 6bd84e32f..f47636f98 100644 --- a/content/en/docs/faq/console/edit-resources-in-system-workspace.md +++ b/content/en/docs/faq/console/edit-resources-in-system-workspace.md @@ -18,7 +18,7 @@ Editing resources in `system-workspace` may cause unexpected results, such as Ku ## Edit the Console Configuration -1. Log in to KubeSphere as `admin`. Click the hammer icon in the bottom-right corner and select **Kubectl**. +1. Log in to KubeSphere as `admin`. Click in the lower-right corner and select **Kubectl**. 2. Execute the following command: @@ -31,9 +31,9 @@ Editing resources in `system-workspace` may cause unexpected results, such as Ku ```yaml client: version: - kubesphere: v3.0.0 - kubernetes: v1.17.9 - openpitrix: v0.3.5 + kubesphere: v3.2.1 + kubernetes: v1.21.5 + openpitrix: v3.2.1 enableKubeConfig: true systemWorkspace: "$" # Add this line manually. ``` diff --git a/content/en/docs/faq/devops/create-devops-kubeconfig-on-aws.md b/content/en/docs/faq/devops/create-devops-kubeconfig-on-aws.md index 9a4a27f61..c7827b981 100644 --- a/content/en/docs/faq/devops/create-devops-kubeconfig-on-aws.md +++ b/content/en/docs/faq/devops/create-devops-kubeconfig-on-aws.md @@ -77,9 +77,7 @@ If you have trouble deploying applications into your project when running a pipe ### Step 3: Create a DevOps kubeconfig -1. Log in to your KubeSphere console of the AWS cluster and go to your DevOps project. Go to **Credentials** under **Project Management**, and then click **Create**. You can name this kubeconfig based on your needs. - - ![create-kubeconfig](/images/docs/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png) +1. Log in to your KubeSphere console of the AWS cluster and go to your DevOps project. Go to **Credentials** under **DevOps Project Settings**, and then click **Create**. You can name this kubeconfig based on your needs. 2. In the **Content** text box, pay attention to the following contents: diff --git a/content/en/docs/faq/devops/install-jenkins-plugins.md b/content/en/docs/faq/devops/install-jenkins-plugins.md index 9658f30c0..dfa608ee4 100644 --- a/content/en/docs/faq/devops/install-jenkins-plugins.md +++ b/content/en/docs/faq/devops/install-jenkins-plugins.md @@ -25,7 +25,7 @@ You need to enable [the KubeSphere DevOps system](../../../pluggable-components/ 1. Run the following command to get the address of Jenkins. ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT ``` @@ -38,7 +38,7 @@ You need to enable [the KubeSphere DevOps system](../../../pluggable-components/ {{< notice note >}} - Make sure you use your own address of Jenkins. You may also need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed. + Make sure you use your own address of Jenkins. You may also need to open the port in your security groups and configure related port forwarding rules depending on where your KubeSphere cluster is deployed. {{}} @@ -46,16 +46,10 @@ You need to enable [the KubeSphere DevOps system](../../../pluggable-components/ 1. Log in to the Jenkins dashboard and click **Manage Jenkins**. - ![click-manage-jenkins](/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png) - 2. On the **Manage Jenkins** page, scroll down to **Manage Plugins** and click it. - ![click-manage-plugins](/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png) - 3. Select the **Available** tab and you have to use the search field to search for the plugins you need. For example, you can enter `git` in the search field, check the checkbox next to the plugin you need, and then click **Install without restart** or **Download now and install after restart** based on your needs. - ![available-plugins](/images/docs/faq/devops/install-plugins-to-jenkins/available-plugins.png) - {{< notice note >}} Jenkins plugins are inter-dependent. You may also need to install dependencies when you install a plugin. @@ -64,16 +58,10 @@ You need to enable [the KubeSphere DevOps system](../../../pluggable-components/ 4. If you downloaded an HPI file in advance, you can also select the **Advanced** tab and upload the HPI file to install it as a plugin. - ![click-advanced-tab](/images/docs/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png) - 5. On the **Installed** tab, you can view all the plugins installed, and the plugins that are safe to uninstall will have the **Uninstall** button shown on the right. - ![installed-plugins](/images/docs/faq/devops/install-plugins-to-jenkins/installed-plugins.png) - 6. On the **Updates** tab, you can install the updates for plugins by checking the checkbox of a plugin and then click the **Download now and install after restart** button. You can also click the **Check now** button to check for updates. - ![update-plugins](/images/docs/faq/devops/install-plugins-to-jenkins/update-plugins.png) - ## See Also [Managing Plugins](https://www.jenkins.io/doc/book/managing/plugins/) \ No newline at end of file diff --git a/content/en/docs/faq/installation/configure-booster.md b/content/en/docs/faq/installation/configure-booster.md index 44c63f90c..5c002d0b2 100644 --- a/content/en/docs/faq/installation/configure-booster.md +++ b/content/en/docs/faq/installation/configure-booster.md @@ -10,19 +10,7 @@ If you have trouble downloading images from `dockerhub.io`, it is highly recomme ## Get a Booster URL -To configure the booster, you need a registry mirror address. See the following example to see how you can get a booster URL from Alibaba Cloud. - -1. Log in to the console of Alibaba Cloud and enter "container registry" in the search bar. Click **Container Registry** in the drop-down list as below. - - ![container-registry](/images/docs/installing-on-linux/faq/configure-booster/container-registry.png) - -2. Click **Image Booster**. - - ![image-booster](/images/docs/installing-on-linux/faq/configure-booster/image-booster.png) - -3. You can find the **Booster URL** in the image below as well as the official guide from Alibaba Cloud to help you configure the booster. - - ![booster-url](/images/docs/installing-on-linux/faq/configure-booster/booster-url.png) +To configure the booster, you need a registry mirror address. See how you can [get a booster URL from Alibaba Cloud](https://www.alibabacloud.com/help/doc-detail/60750.htm?spm=a2c63.p38356.b99.18.4f4133f0uTKb8S). ## Set the Registry Mirror diff --git a/content/en/docs/faq/installation/telemetry.md b/content/en/docs/faq/installation/telemetry.md index c4568ec37..700886a74 100644 --- a/content/en/docs/faq/installation/telemetry.md +++ b/content/en/docs/faq/installation/telemetry.md @@ -29,15 +29,15 @@ Telemetry is enabled by default when you install KubeSphere, while you also have ### Disable Telemetry before installation -When you install KubeSphere on an existing Kubernetes cluster, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) for cluster settings. If you want to disable Telemetry, do not run `kubectl apply -f` directly for this file. +When you install KubeSphere on an existing Kubernetes cluster, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) for cluster settings. If you want to disable Telemetry, do not run `kubectl apply -f` directly for this file. {{< notice note >}} -If you install KubeSphere on Linux, see [Disable Telemetry after Installation](../telemetry/#disable-telemetry-after-installation) directly. +If you install KubeSphere on Linux, see [Disable Telemetry After Installation](../telemetry/#disable-telemetry-after-installation) directly. {{}} -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it: +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it: ```bash vi cluster-configuration.yaml @@ -57,14 +57,14 @@ If you install KubeSphere on Linux, see [Disable Telemetry after Installation](. 3. Save the file and run the following commands to start installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` ### Disable Telemetry after installation -1. Log in to the console as `admin` and click **Platform** in the top-left corner. +1. Log in to the console as `admin` and click **Platform** in the upper-left corner. 2. Select **Cluster Management** and navigate to **CRDs**. @@ -76,7 +76,7 @@ If you have enabled [the multi-cluster feature](../../../multicluster-management 4. Click on the right of `ks-installer` and select **Edit YAML**. -5. Scroll down to the bottom of the file, add `telemetry_enabled: false`, and then click **Update**. +5. Scroll down to the bottom of the file, add `telemetry_enabled: false`, and then click **OK**. {{< notice note >}} diff --git a/content/en/docs/faq/multi-cluster-management/manage-multi-cluster.md b/content/en/docs/faq/multi-cluster-management/manage-multi-cluster.md index f10c4c2d0..5bb132e42 100644 --- a/content/en/docs/faq/multi-cluster-management/manage-multi-cluster.md +++ b/content/en/docs/faq/multi-cluster-management/manage-multi-cluster.md @@ -25,7 +25,7 @@ Once you build a multi-cluster environment on KubeSphere, you can manage it thro It is not recommended that you change a Host Cluster to a Member Cluster or the other way round. If a Member Cluster has been imported to a Host Cluster before, you have to use the same cluster name when importing it to a new Host Cluster after unbinding it from the previous Host Cluster. -If you want to import the Member Cluster to a new Host Cluster while retaining existing projects (i.e. namespaces), you can follow the steps as below. +If you want to import the Member Cluster to a new Host Cluster while retaining existing projects, you can follow the steps as below. 1. Run the following command on the Member Cluster to unbind the projects to be retained from your workspace. @@ -45,11 +45,11 @@ If you want to import the Member Cluster to a new Host Cluster while retaining e kuebctl label ns kubesphere.io/workspace= ``` -### Account Management +### User Management -The accounts you create through the central control plane from your Host Cluster will be synchronized to Member Clusters. +The users you create through the central control plane from your Host Cluster will be synchronized to Member Clusters. -If you want to let different accounts access different clusters, you can create workspaces and [assign different clusters to them](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/). After that, you can invite different accounts to these workspaces per access requirements for these accounts. In future releases, you will be able to invite accounts to your [multi-cluster projects](../../../project-administration/project-and-multicluster-project/#multi-cluster-projects). +If you want to let different users access different clusters, you can create workspaces and [assign different clusters to them](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/). After that, you can invite different users to these workspaces per access requirements for these users. ### KubeSphere Components Management diff --git a/content/en/docs/faq/observability/byop.md b/content/en/docs/faq/observability/byop.md index 70208867f..f34fe9f61 100644 --- a/content/en/docs/faq/observability/byop.md +++ b/content/en/docs/faq/observability/byop.md @@ -113,7 +113,7 @@ There are a few items listed in [KubeSphere kustomization](https://github.com/ku If your Prometheus stack setup isn't managed by Prometheus Operator, you can skip this step. But you have to make sure that: -- You must copy the recording/alerting rules in [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) and [PrometheusRule for ETCD](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) to your Prometheus config for KubeSphere v3.0.0 to work properly. +- You must copy the recording/alerting rules in [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) and [PrometheusRule for etcd](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) to your Prometheus config for KubeSphere v3.0.0 to work properly. - Configure your Prometheus to scrape metrics from the same targets as the ServiceMonitors listed in [KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml). diff --git a/content/en/docs/faq/observability/logging.md b/content/en/docs/faq/observability/logging.md index 0b82de7ab..4073e56d5 100644 --- a/content/en/docs/faq/observability/logging.md +++ b/content/en/docs/faq/observability/logging.md @@ -27,7 +27,7 @@ If you are using the KubeSphere internal Elasticsearch and want to change it to kubectl edit cc -n kubesphere-system ks-installer ``` -2. Comment out `es.elasticsearchDataXXX`, `es.elasticsearchMasterXXX` and `status.logging`, and set `es.externalElasticsearchUrl` to the address of your Elasticsearch and `es.externalElasticsearchPort` to its port number. Below is an example for your reference. +2. Comment out `es.elasticsearchDataXXX`, `es.elasticsearchMasterXXX` and `status.logging`, and set `es.externalElasticsearchHost` to the address of your Elasticsearch and `es.externalElasticsearchPort` to its port number. Below is an example for your reference. ```yaml apiVersion: installer.kubesphere.io/v1alpha1 @@ -46,7 +46,7 @@ If you are using the KubeSphere internal Elasticsearch and want to change it to # elasticsearchMasterVolumeSize: 4Gi elkPrefix: logstash logMaxAge: 7 - externalElasticsearchUrl: <192.168.0.2> + externalElasticsearchHost: <192.168.0.2> externalElasticsearchPort: <9200> ... status: diff --git a/content/en/docs/faq/upgrade/qingcloud-csi-upgrade.md b/content/en/docs/faq/upgrade/qingcloud-csi-upgrade.md index bad893f6b..3b6254b04 100644 --- a/content/en/docs/faq/upgrade/qingcloud-csi-upgrade.md +++ b/content/en/docs/faq/upgrade/qingcloud-csi-upgrade.md @@ -1,6 +1,6 @@ --- title: "Upgrade QingCloud CSI" -keywords: "Kubernetes, upgrade, KubeSphere, v3.0.0" +keywords: "Kubernetes, upgrade, KubeSphere, v3.2.0" description: "Upgrade the QingCloud CSI after you upgrade KubeSphere." linkTitle: "Upgrade QingCloud CSI" weight: 16210 @@ -19,7 +19,7 @@ cd qingcloud-csi/ ``` ``` -git checkout v1.1.1 +git checkout v1.2.0 ``` ``` diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md index 43569152f..5bfa4ca09 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md @@ -77,9 +77,9 @@ All the other Resources will be placed in `MC_KubeSphereRG_KuberSphereCluster_we To start deploying KubeSphere, use the following commands. ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` You can inspect the logs of installation through the following command: diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md index 6a61fe4b9..58a3c7a4c 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md @@ -28,7 +28,7 @@ You need to select: {{< notice note >}} -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - 2 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. - The machine type Standard / 4 GB / 2 vCPUs is for minimal installation. If you plan to enable several pluggable components or use the cluster for production, you can upgrade your nodes to a more powerfull type (such as CPU-Optimized / 8 GB / 4 vCPUs). It seems that DigitalOcean provisions the master nodes based on the type of the worker nodes, and for Standard ones the API server can become unresponsive quite soon. @@ -45,9 +45,9 @@ Now that the cluster is ready, you can install KubeSphere following the steps be - Install KubeSphere using kubectl. The following commands are only for the default minimal installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - Inspect the logs of installation: @@ -106,9 +106,7 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere b {{}} -- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard as shown in the following image. - - ![doks-cluster](/images/docs/do/doks-cluster.png) +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md index 68a7a7b8d..49bdb7ac0 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md @@ -79,7 +79,7 @@ Check the installation with `aws --version`. {{< notice note >}} -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. - The machine type t3.medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources. - For other settings, you can change them as well based on your own needs or use the default value. @@ -125,9 +125,9 @@ We will use the kubectl command-line utility for communicating with the cluster - Install KubeSphere using kubectl. The following commands are only for the default minimal installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - Inspect the logs of installation: @@ -173,9 +173,7 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere b - Access the web console of KubeSphere using the external IP generated by EKS. -- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard as shown in the following image. - - ![eks-cluster](https://ap3.qingstor.com/kubesphere-website/docs/gke-cluster.png) +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md index 4b83b109a..937792431 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md @@ -30,7 +30,7 @@ This guide walks you through the steps of deploying KubeSphere on [Google Kubern {{< notice note >}} -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. - The machine type e2-medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources. - For other settings, you can change them as well based on your own needs or use the default value. @@ -46,9 +46,9 @@ This guide walks you through the steps of deploying KubeSphere on [Google Kubern - Install KubeSphere using kubectl. The following commands are only for the default minimal installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - Inspect the logs of installation: @@ -99,9 +99,7 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere b {{}} -- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard as shown in the following image. - - ![gke-cluster](https://ap3.qingstor.com/kubesphere-website/docs/gke-cluster.png) +- Log in to the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md index dcb3f5cc6..95605431b 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md @@ -14,7 +14,7 @@ This guide walks you through the steps of deploying KubeSphere on [Huaiwei CCE]( First, create a Kubernetes cluster based on the requirements below. -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - Ensure the cloud computing network for your Kubernetes cluster works, or use an elastic IP when you use **Auto Create** or **Select Existing**. You can also configure the network after the cluster is created. Refer to [NAT Gateway](https://support.huaweicloud.com/en-us/productdesc-natgateway/en-us_topic_0086739762.html). - Select `s3.xlarge.2` `4-core|8GB` for nodes and add more if necessary (3 and more nodes are required for a production environment). @@ -76,9 +76,9 @@ For how to set up or cancel a default StorageClass, refer to Kubernetes official Use [ks-installer](https://github.com/kubesphere/ks-installer) to deploy KubeSphere on an existing Kubernetes cluster. Execute the following commands directly for a minimal installation: ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` Go to **Workload** > **Pod**, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. Check `ks-console-xxxx` of the namespace to understand the availability of KubeSphere console. @@ -99,8 +99,6 @@ Default settings are OK for other detailed configurations. You can also set them After you set LoadBalancer for KubeSphere console, you can visit it via the given address. Go to KubeSphere login page and use the default account (username `admin` and password `P@88w0rd`) to log in. - ![Log in to KubeSphere Console](/images/docs/huawei-cce/en/login-ks-console.png) - ## Enable Pluggable Components (Optional) The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details. @@ -111,6 +109,4 @@ Before you use Istio-based features of KubeSphere, you have to delete `applicati {{}} -After your component is installed, go to the **Cluster Management** page, and you will see the interface below. You can check the status of your component in **Components**. - - ![Full View of KubeSphere Console](/images/docs/huawei-cce/en/view-ks-console-full.png) +After your component is installed, go to the **Cluster Management** page, and you will see the interface below. You can check the status of your component in **System Components**. diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md index 9dc601fd4..98acb2a19 100644 --- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md +++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md @@ -30,7 +30,7 @@ This guide walks you through the steps of deploying KubeSphere on [Oracle Kubern {{< notice note >}} - - To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. + - To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - It is recommended that you should select **Public** for **Visibility Type**, which will assign a public IP address for every node. The IP address can be used later to access the web console of KubeSphere. - In Oracle Cloud, a Shape is a template that determines the number of CPUs, amount of memory, and other resources that are allocated to an instance. `VM.Standard.E2.2 (2 CPUs and 16G Memory)` is used in this example. For more information, see [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard). - 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment. @@ -68,9 +68,9 @@ This guide walks you through the steps of deploying KubeSphere on [Oracle Kubern - Install KubeSphere using kubectl. The following commands are only for the default minimal installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - Inspect the logs of installation: @@ -140,9 +140,7 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere e ![console-service](https://ap3.qingstor.com/kubesphere-website/docs/console-service.png) -- Log in to the console through the external IP address with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard shown below: - - ![kubesphere-oke-dashboard](https://ap3.qingstor.com/kubesphere-website/docs/kubesphere-oke-dashboard.png) +- Log in to the console through the external IP address with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/installing-on-kubernetes/introduction/overview.md b/content/en/docs/installing-on-kubernetes/introduction/overview.md index d8296ea42..24b3f36e7 100644 --- a/content/en/docs/installing-on-kubernetes/introduction/overview.md +++ b/content/en/docs/installing-on-kubernetes/introduction/overview.md @@ -29,9 +29,9 @@ After you make sure your existing Kubernetes cluster meets all the requirements, 1. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 2. Inspect the logs of installation: diff --git a/content/en/docs/installing-on-kubernetes/introduction/prerequisites.md b/content/en/docs/installing-on-kubernetes/introduction/prerequisites.md index 655152054..856c0a2a7 100644 --- a/content/en/docs/installing-on-kubernetes/introduction/prerequisites.md +++ b/content/en/docs/installing-on-kubernetes/introduction/prerequisites.md @@ -8,7 +8,7 @@ weight: 4120 You can install KubeSphere on virtual machines and bare metal with Kubernetes also provisioned. In addition, KubeSphere can also be deployed on cloud-hosted and on-premises Kubernetes clusters as long as your Kubernetes cluster meets the prerequisites below. -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x, or v1.22.x (experimental). - Available CPU > 1 Core and Memory > 2 G. - A **default** StorageClass in your Kubernetes cluster is configured; use `kubectl get sc` to verify it. - The CSR signing feature is activated in kube-apiserver when it is started with the `--cluster-signing-cert-file` and `--cluster-signing-key-file` parameters. See [RKE installation issue](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309). diff --git a/content/en/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md b/content/en/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md index 57da6ee2f..92f52ec0a 100644 --- a/content/en/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md +++ b/content/en/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md @@ -89,7 +89,7 @@ As you install KubeSphere in an air-gapped environment, you need to prepare an i 1. Download the image list file `images-list.txt` from a machine that has access to the Internet through the following command: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} @@ -101,7 +101,7 @@ As you install KubeSphere in an air-gapped environment, you need to prepare an i 2. Download `offline-installation-tool.sh`. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. Make the `.sh` file executable. @@ -124,7 +124,7 @@ As you install KubeSphere in an air-gapped environment, you need to prepare an i -l IMAGES-LIST : text file with list of images. -r PRIVATE-REGISTRY : target private registry:port. -s : save model will be applied. Pull the images in the IMAGES-LIST and save images as a tar.gz file. - -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.17.9 + -v KUBERNETES-VERSION : download kubernetes' binaries. default: v1.21.5 -h : usage message ``` @@ -161,8 +161,8 @@ Similar to installing KubeSphere on an existing Kubernetes cluster in an online 1. Execute the following commands to download these two files and transfer them to your machine that serves as the taskbox for installation. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` 2. Edit `cluster-configuration.yaml` to add your private image registry. For example, `dockerhub.kubekey.local` is the registry address in this tutorial, then use it as the value of `.spec.local_registry` as below: @@ -242,160 +242,155 @@ To access the console, make sure port 30880 is opened in your security group. ## Appendix -### Image list of KubeSphere v3.1.1 +### Image list of KubeSphere 3.2.1 ```txt ##k8s-images -kubesphere/kube-apiserver:v1.20.6 -kubesphere/kube-scheduler:v1.20.6 -kubesphere/kube-proxy:v1.20.6 -kubesphere/kube-controller-manager:v1.20.6 -kubesphere/kube-apiserver:v1.19.8 -kubesphere/kube-scheduler:v1.19.8 -kubesphere/kube-proxy:v1.19.8 -kubesphere/kube-controller-manager:v1.19.8 +kubesphere/kube-apiserver:v1.22.1 +kubesphere/kube-controller-manager:v1.22.1 +kubesphere/kube-proxy:v1.22.1 +kubesphere/kube-scheduler:v1.22.1 +kubesphere/kube-apiserver:v1.21.5 +kubesphere/kube-controller-manager:v1.21.5 +kubesphere/kube-proxy:v1.21.5 +kubesphere/kube-scheduler:v1.21.5 +kubesphere/kube-apiserver:v1.20.10 +kubesphere/kube-controller-manager:v1.20.10 +kubesphere/kube-proxy:v1.20.10 +kubesphere/kube-scheduler:v1.20.10 kubesphere/kube-apiserver:v1.19.9 -kubesphere/kube-scheduler:v1.19.9 -kubesphere/kube-proxy:v1.19.9 kubesphere/kube-controller-manager:v1.19.9 -kubesphere/kube-apiserver:v1.18.8 -kubesphere/kube-scheduler:v1.18.8 -kubesphere/kube-proxy:v1.18.8 -kubesphere/kube-controller-manager:v1.18.8 -kubesphere/kube-apiserver:v1.17.9 -kubesphere/kube-scheduler:v1.17.9 -kubesphere/kube-proxy:v1.17.9 -kubesphere/kube-controller-manager:v1.17.9 -kubesphere/pause:3.1 -kubesphere/pause:3.2 -kubesphere/etcd:v3.4.13 -calico/cni:v3.16.3 -calico/kube-controllers:v3.16.3 -calico/node:v3.16.3 -calico/pod2daemon-flexvol:v3.16.3 -calico/typha:v3.16.3 +kubesphere/kube-proxy:v1.19.9 +kubesphere/kube-scheduler:v1.19.9 +kubesphere/pause:3.5 +kubesphere/pause:3.4.1 +coredns/coredns:1.8.0 +calico/cni:v3.20.0 +calico/kube-controllers:v3.20.0 +calico/node:v3.20.0 +calico/pod2daemon-flexvol:v3.20.0 +calico/typha:v3.20.0 kubesphere/flannel:v0.12.0 -coredns/coredns:1.6.9 -kubesphere/k8s-dns-node-cache:1.15.12 openebs/provisioner-localpv:2.10.1 openebs/linux-utils:2.10.0 -kubesphere/nfs-client-provisioner:v3.1.0-k8s1.11 -##csi-images -csiplugin/csi-neonsan:v1.2.0 -csiplugin/csi-neonsan-ubuntu:v1.2.0 -csiplugin/csi-neonsan-centos:v1.2.0 -csiplugin/csi-provisioner:v1.5.0 -csiplugin/csi-attacher:v2.1.1 -csiplugin/csi-resizer:v0.4.0 -csiplugin/csi-snapshotter:v2.0.1 -csiplugin/csi-node-driver-registrar:v1.2.0 -csiplugin/csi-qingcloud:v1.2.0 +kubesphere/k8s-dns-node-cache:1.15.12 ##kubesphere-images -kubesphere/ks-apiserver:v3.1.1 -kubesphere/ks-console:v3.1.1 -kubesphere/ks-controller-manager:v3.1.1 -kubesphere/ks-installer:v3.1.1 +kubesphere/ks-installer:v3.2.1 +kubesphere/ks-apiserver:v3.2.1 +kubesphere/ks-console:v3.2.1 +kubesphere/ks-controller-manager:v3.2.1 kubesphere/kubectl:v1.20.0 -kubesphere/kubectl:v1.19.0 -redis:5.0.12-alpine -alpine:3.14 -haproxy:2.0.22-alpine -nginx:1.14-alpine +kubesphere/kubefed:v0.8.1 +kubesphere/tower:v0.2.0 minio/minio:RELEASE.2019-08-07T01-59-21Z minio/mc:RELEASE.2019-08-07T23-14-43Z +csiplugin/snapshot-controller:v4.0.0 +kubesphere/nginx-ingress-controller:v0.48.1 mirrorgooglecontainers/defaultbackend-amd64:1.4 -kubesphere/nginx-ingress-controller:v0.35.0 -osixia/openldap:1.3.0 -csiplugin/snapshot-controller:v3.0.3 -kubesphere/kubefed:v0.7.0 -kubesphere/tower:v0.2.0 -kubesphere/prometheus-config-reloader:v0.42.1 -kubesphere/prometheus-operator:v0.42.1 -prom/alertmanager:v0.21.0 -prom/prometheus:v2.26.0 -prom/node-exporter:v0.18.1 -kubesphere/ks-alerting-migration:v3.1.0 -jimmidyson/configmap-reload:v0.3.0 -kubesphere/notification-manager-operator:v1.0.0 -kubesphere/notification-manager:v1.0.0 kubesphere/metrics-server:v0.4.2 +redis:5.0.14-alpine +haproxy:2.0.25-alpine +alpine:3.14 +osixia/openldap:1.3.0 +kubesphere/netshoot:v1.0 +##kubeedge-images +kubeedge/cloudcore:v1.7.2 +kubesphere/edge-watcher:v0.1.1 +kubesphere/edge-watcher-agent:v0.1.0 +##gatekeeper-images +openpolicyagent/gatekeeper:v3.5.2 +##openpitrix-images +kubesphere/openpitrix-jobs:v3.2.1 +##kubesphere-devops-images +kubesphere/devops-apiserver:v3.2.1 +kubesphere/devops-controller:v3.2.1 +kubesphere/devops-tools:v3.2.1 +kubesphere/ks-jenkins:v3.2.0-2.249.1 +jenkins/jnlp-slave:3.27-1 +kubesphere/builder-base:v3.2.0 +kubesphere/builder-nodejs:v3.2.0 +kubesphere/builder-maven:v3.2.0 +kubesphere/builder-python:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-base:v3.2.0-podman +kubesphere/builder-nodejs:v3.2.0-podman +kubesphere/builder-maven:v3.2.0-podman +kubesphere/builder-python:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/s2ioperator:v3.2.0 +kubesphere/s2irun:v3.2.0 +kubesphere/s2i-binary:v3.2.0 +kubesphere/tomcat85-java11-centos7:v3.2.0 +kubesphere/tomcat85-java11-runtime:v3.2.0 +kubesphere/tomcat85-java8-centos7:v3.2.0 +kubesphere/tomcat85-java8-runtime:v3.2.0 +kubesphere/java-11-centos7:v3.2.0 +kubesphere/java-8-centos7:v3.2.0 +kubesphere/java-8-runtime:v3.2.0 +kubesphere/java-11-runtime:v3.2.0 +kubesphere/nodejs-8-centos7:v3.2.0 +kubesphere/nodejs-6-centos7:v3.2.0 +kubesphere/nodejs-4-centos7:v3.2.0 +kubesphere/python-36-centos7:v3.2.0 +kubesphere/python-35-centos7:v3.2.0 +kubesphere/python-34-centos7:v3.2.0 +kubesphere/python-27-centos7:v3.2.0 +##kubesphere-monitoring-images +jimmidyson/configmap-reload:v0.3.0 +prom/prometheus:v2.26.0 +kubesphere/prometheus-config-reloader:v0.43.2 +kubesphere/prometheus-operator:v0.43.2 kubesphere/kube-rbac-proxy:v0.8.0 kubesphere/kube-state-metrics:v1.9.7 -openebs/provisioner-localpv:2.3.0 +prom/node-exporter:v0.18.1 +kubesphere/k8s-prometheus-adapter-amd64:v0.6.0 +prom/alertmanager:v0.21.0 thanosio/thanos:v0.18.0 grafana/grafana:7.4.3 +kubesphere/kube-rbac-proxy:v0.8.0 +kubesphere/notification-manager-operator:v1.4.0 +kubesphere/notification-manager:v1.4.0 +kubesphere/notification-tenant-sidecar:v3.2.0 ##kubesphere-logging-images -kubesphere/elasticsearch-oss:6.7.0-1 kubesphere/elasticsearch-curator:v5.7.6 -kubesphere/fluentbit-operator:v0.5.0 -kubesphere/fluentbit-operator:migrator -kubesphere/fluent-bit:v1.6.9 -elastic/filebeat:6.7.0 -kubesphere/kube-auditing-operator:v0.1.2 -kubesphere/kube-auditing-webhook:v0.1.2 -kubesphere/kube-events-exporter:v0.1.0 -kubesphere/kube-events-operator:v0.1.0 -kubesphere/kube-events-ruler:v0.2.0 -kubesphere/log-sidecar-injector:1.1 +kubesphere/elasticsearch-oss:6.7.0-1 +kubesphere/fluentbit-operator:v0.11.0 docker:19.03 +kubesphere/fluent-bit:v1.8.3 +kubesphere/log-sidecar-injector:1.1 +elastic/filebeat:6.7.0 +kubesphere/kube-events-operator:v0.3.0 +kubesphere/kube-events-exporter:v0.3.0 +kubesphere/kube-events-ruler:v0.3.0 +kubesphere/kube-auditing-operator:v0.2.0 +kubesphere/kube-auditing-webhook:v0.2.0 ##istio-images -istio/pilot:1.6.10 -istio/proxyv2:1.6.10 -jaegertracing/jaeger-agent:1.17 -jaegertracing/jaeger-collector:1.17 -jaegertracing/jaeger-es-index-cleaner:1.17 -jaegertracing/jaeger-operator:1.17.1 -jaegertracing/jaeger-query:1.17 -kubesphere/kiali:v1.26.1 -kubesphere/kiali-operator:v1.26.1 -##kubesphere-devops-images -kubesphere/ks-jenkins:2.249.1 -jenkins/jnlp-slave:3.27-1 -kubesphere/s2ioperator:v3.1.0 -kubesphere/s2irun:v2.1.1 -kubesphere/builder-base:v3.1.0 -kubesphere/builder-nodejs:v3.1.0 -kubesphere/builder-maven:v3.1.0 -kubesphere/builder-go:v3.1.0 -kubesphere/s2i-binary:v2.1.0 -kubesphere/tomcat85-java11-centos7:v2.1.0 -kubesphere/tomcat85-java11-runtime:v2.1.0 -kubesphere/tomcat85-java8-centos7:v2.1.0 -kubesphere/tomcat85-java8-runtime:v2.1.0 -kubesphere/java-11-centos7:v2.1.0 -kubesphere/java-8-centos7:v2.1.0 -kubesphere/java-8-runtime:v2.1.0 -kubesphere/java-11-runtime:v2.1.0 -kubesphere/nodejs-8-centos7:v2.1.0 -kubesphere/nodejs-6-centos7:v2.1.0 -kubesphere/nodejs-4-centos7:v2.1.0 -kubesphere/python-36-centos7:v2.1.0 -kubesphere/python-35-centos7:v2.1.0 -kubesphere/python-34-centos7:v2.1.0 -kubesphere/python-27-centos7:v2.1.0 -##openpitrix-images -kubespheredev/openpitrix-jobs:v3.1.1 -##weave-scope-images -weaveworks/scope:1.13.0 -##kubeedge-images -kubeedge/cloudcore:v1.6.2 -kubesphere/edge-watcher:v0.1.0 -kubesphere/kube-rbac-proxy:v0.5.0 -kubesphere/edge-watcher-agent:v0.1.0 -##example-images-images -kubesphere/examples-bookinfo-productpage-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v2:1.16.2 -kubesphere/examples-bookinfo-reviews-v3:1.16.2 -kubesphere/examples-bookinfo-details-v1:1.16.2 -kubesphere/examples-bookinfo-ratings-v1:1.16.3 +istio/pilot:1.11.1 +istio/proxyv2:1.11.1 +jaegertracing/jaeger-operator:1.27 +jaegertracing/jaeger-agent:1.27 +jaegertracing/jaeger-collector:1.27 +jaegertracing/jaeger-query:1.27 +jaegertracing/jaeger-es-index-cleaner:1.27 +kubesphere/kiali-operator:v1.38.1 +kubesphere/kiali:v1.38 +##example-images busybox:1.31.1 +nginx:1.14-alpine joosthofman/wget:1.0 -kubesphere/netshoot:v1.0 nginxdemos/hello:plain-text wordpress:4.8-apache mirrorgooglecontainers/hpa-example:latest java:openjdk-8-jre-alpine fluent/fluentd:v1.4.2-2.0 perl:latest +kubesphere/examples-bookinfo-productpage-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v2:1.16.2 +kubesphere/examples-bookinfo-details-v1:1.16.2 +kubesphere/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +weaveworks/scope:1.13.0 ``` diff --git a/content/en/docs/installing-on-linux/cluster-operation/add-edge-nodes.md b/content/en/docs/installing-on-linux/cluster-operation/add-edge-nodes.md index 1e96adc6e..efee63a04 100644 --- a/content/en/docs/installing-on-linux/cluster-operation/add-edge-nodes.md +++ b/content/en/docs/installing-on-linux/cluster-operation/add-edge-nodes.md @@ -90,9 +90,9 @@ To make sure edge nodes can successfully talk to your cluster, you must forward ## Add an Edge Node -1. Log in to the console as `admin` and click **Platform** in the top-left corner. +1. Log in to the console as `admin` and click **Platform** in the upper-left corner. -2. Select **Cluster Management** and navigate to **Edge Nodes** under **Node Management**. +2. Select **Cluster Management** and navigate to **Edge Nodes** under **Nodes**. {{< notice note >}} @@ -100,7 +100,7 @@ To make sure edge nodes can successfully talk to your cluster, you must forward {{}} -3. Click **Add Node**. In the dialog that appears, set a node name and enter an internal IP address of your edge node. Click **Validate** to continue. +3. Click **Add**. In the dialog that appears, set a node name and enter an internal IP address of your edge node. Click **Validate** to continue. {{< notice note >}} @@ -109,7 +109,7 @@ To make sure edge nodes can successfully talk to your cluster, you must forward {{}} -4. Copy the command automatically created under **Add Command** and run it on your edge node. +4. Copy the command automatically created under **Edge Node Configuration Command** and run it on your edge node. {{< notice note >}} @@ -117,19 +117,15 @@ To make sure edge nodes can successfully talk to your cluster, you must forward {{}} - ![edge-node-dialog](/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-dialog.png) - 5. Close the dialog, refresh the page, and the edge node will appear in the list. - ![edge-node-added](/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-added.png) - {{< notice note >}} After an edge node is added, if you cannot see CPU and memory resource usage on the **Edge Nodes** page, make sure [Metrics Server](../../../pluggable-components/metrics-server/) 0.4.1 or later is installed in your cluster. {{}} -6. After an edge node joins your cluster, some Pods may be scheduled to it while they remains in the `Pending` state on the edge node. Due to the tolerations some DaemonSets (for example, Calico) have, in the current version (KubeSphere v3.1.1), you need to manually patch some Pods so that they will not be schedule to the edge node. +6. After an edge node joins your cluster, some Pods may be scheduled to it while they remains in the `Pending` state on the edge node. Due to the tolerations some DaemonSets (for example, Calico) have, in the current version (KubeSphere 3.2.1), you need to manually patch some Pods so that they will not be schedule to the edge node. ```bash #!/bin/bash diff --git a/content/en/docs/installing-on-linux/cluster-operation/add-new-nodes.md b/content/en/docs/installing-on-linux/cluster-operation/add-new-nodes.md index 866aa2f5d..30bdf0ad8 100644 --- a/content/en/docs/installing-on-linux/cluster-operation/add-new-nodes.md +++ b/content/en/docs/installing-on-linux/cluster-operation/add-new-nodes.md @@ -1,12 +1,12 @@ --- -title: "Add New Nodes" +title: "Add New Nodes to a Kubernetes Cluster" keywords: 'Kubernetes, KubeSphere, scale-out, add-nodes' description: 'Add more nodes to scale out your cluster.' linkTitle: "Add New Nodes" weight: 3610 --- -After you use KubeSphere for a certain period of time, it is likely that you need to scale out your cluster with an increasing number of workloads. From KubeSphere v3.0.0, you can use the brand-new installer [KubeKey](https://github.com/kubesphere/kubekey) to add new nodes to a cluster. Fundamentally, the operation is based on Kubelet's registration mechanism. In other words, the new nodes will automatically join the existing Kubernetes cluster. KubeSphere supports hybrid environments, which means the newly-added host OS can be CentOS or Ubuntu. +After you use KubeSphere for a certain period of time, it is likely that you need to scale out your cluster with an increasing number of workloads. From KubeSphere v3.0.0, you can use the brand-new installer [KubeKey](https://github.com/kubesphere/kubekey) to add new nodes to a Kubernetes cluster. Fundamentally, the operation is based on Kubelet's registration mechanism. In other words, the new nodes will automatically join the existing Kubernetes cluster. KubeSphere supports hybrid environments, which means the newly-added host OS can be CentOS or Ubuntu. This tutorial demonstrates how to add new nodes to a single-node cluster. To scale out a multi-node cluster, the steps are basically the same. @@ -16,7 +16,7 @@ This tutorial demonstrates how to add new nodes to a single-node cluster. To sca - You have [downloaded KubeKey](../../../installing-on-linux/introduction/multioverview/#step-2-download-kubekey). -## Add Worker Nodes +## Add Worker Nodes to Kubernetes 1. Retrieve your cluster information using KubeKey. The command below creates a configuration file (`sample.yaml`). @@ -74,7 +74,7 @@ You can skip this step if you already have the configuration file on your machin node2 Ready worker 31h v1.17.9 ``` -## Add Master Nodes for High Availability +## Add New Master Nodes for High Availability The steps of adding master nodes are generally the same as adding worker nodes while you need to configure a load balancer for your cluster. You can use any cloud load balancers or hardware load balancers (for example, F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating highly available clusters. diff --git a/content/en/docs/installing-on-linux/cluster-operation/remove-nodes.md b/content/en/docs/installing-on-linux/cluster-operation/remove-nodes.md index 7e2ceb584..b7ec0504b 100644 --- a/content/en/docs/installing-on-linux/cluster-operation/remove-nodes.md +++ b/content/en/docs/installing-on-linux/cluster-operation/remove-nodes.md @@ -10,9 +10,7 @@ weight: 3620 Marking a node as unschedulable prevents the scheduler from placing new Pods onto that node while not affecting existing Pods on the node. This is useful as a preparatory step before a node reboot or other maintenance. -Log in to the console as `admin` and go to the **Cluster Management** page. To mark a node unschedulable, choose **Cluster Nodes** under **Node Management** from the left menu, find a node you want to remove from the cluster, and click **Cordon**. Alternatively, you can run the command `kubectl cordon $NODENAME` directly. See [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) for more details. - -![cordon](/images/docs/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png) +Log in to the console as `admin` and go to the **Cluster Management** page. To mark a node unschedulable, choose **Cluster Nodes** under **Nodes** from the left menu, find a node you want to remove from the cluster, and click **Cordon**. Alternatively, you can run the command `kubectl cordon $NODENAME` directly. See [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) for more details. {{< notice note >}} diff --git a/content/en/docs/installing-on-linux/high-availability-configurations/ha-configuration.md b/content/en/docs/installing-on-linux/high-availability-configurations/ha-configuration.md index 2efb61d4a..e8a9543d4 100644 --- a/content/en/docs/installing-on-linux/high-availability-configurations/ha-configuration.md +++ b/content/en/docs/installing-on-linux/high-availability-configurations/ha-configuration.md @@ -3,7 +3,7 @@ title: "Set up an HA Cluster Using a Load Balancer" keywords: 'KubeSphere, Kubernetes, HA, high availability, installation, configuration' description: 'Learn how to create a highly available cluster using a load balancer.' linkTitle: "Set up an HA Cluster Using a Load Balancer" -weight: 3210 +weight: 3220 --- You can set up a single-master Kubernetes cluster with KubeSphere installed based on the tutorial of [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/). Single-master clusters may be sufficient for development and testing in most cases. For a production environment, however, you need to consider the high availability of the cluster. If key components (for example, kube-apiserver, kube-scheduler, and kube-controller-manager) are all running on the same master node, Kubernetes and KubeSphere will be unavailable once the master node goes down. Therefore, you need to set up a high-availability cluster by provisioning load balancers with multiple master nodes. You can use any cloud load balancer, or any hardware load balancer (for example, F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating high-availability clusters. @@ -48,7 +48,7 @@ You must create a load balancer in your environment to listen (also known as lis Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -64,7 +64,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -79,7 +79,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -89,15 +89,15 @@ Make `kk` executable: chmod +x kk ``` -Create an example configuration file with default configurations. Here Kubernetes v1.20.4 is used as an example. +Create an example configuration file with default configurations. Here Kubernetes v1.21.5 is used as an example. ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -145,12 +145,14 @@ For more information about different fields in this configuration file, see [Kub ### Configure the load balancer ```yaml -## Public LB config example -## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" +spec: controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + domain: lb.kubesphere.local address: "192.168.0.xx" - port: "6443" + port: 6443 ``` {{< notice note >}} @@ -158,6 +160,7 @@ For more information about different fields in this configuration file, see [Kub - The address and port should be indented by two spaces in `config-sample.yaml`. - In most cases, you need to provide the **private IP address** of the load balancer for the field `address`. However, different cloud providers may have different configurations for load balancers. For example, if you configure a Server Load Balancer (SLB) on Alibaba Cloud, the platform assigns a public IP address to the SLB, which means you need to specify the public IP address for the field `address`. - The domain name of the load balancer is `lb.kubesphere.local` by default for internal access. +- To use an internal load balancer, uncomment the field `internalLoadbalancer`. {{}} diff --git a/content/en/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md b/content/en/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md new file mode 100644 index 000000000..f51621690 --- /dev/null +++ b/content/en/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md @@ -0,0 +1,192 @@ +--- +title: "Set Up an HA Cluster Using the Internal HAProxy of KubeKey" +keywords: 'KubeSphere, Kubernetes, KubeKey, HA, Installation' +description: 'Learn how to create a highly available cluster using the internal HAProxy of KubeKey.' +linkTitle: "Set Up an HA Cluster Using the Internal HAProxy of KubeKey" +weight: 3210 +--- + +[KubeKey](https://github.com/kubesphere/kubekey) is an easy-to-use tool for creating Kubernetes clusters. Starting from v1.2.1, KubeKey provides a built-in high availability mode to simplify the creation of highly available Kubernetes clusters. The high availability mode that KubeKey implements is called local load balancing mode. KubeKey deploys a load balancer (HAProxy) on each worker node, and the Kubernetes components on all control planes connect to their local kube-apiserver. The Kubernetes components on each worker node, on the other hand, connect to the kube-apiserver of multiple control planes through a reverse proxy, namely the load balancer deployed by KubeKey. Although this mode is less efficient than a dedicated load balancer because additional health check mechanisms are introduced, it brings a more practical, efficient, and convenient high availability deployment mode when current environment cannot provide an external load balancer or virtual IP (VIP). + +This document describes how to use the built-in high availability mode when installing KubeSphere on Linux. + +## Architecture + +Make sure you have prepared six Linux machines before you begin, three of which serve as control planes and the other three as worker nodes. The following figure shows the architecture of the built-in high availability mode. For more information about system and network requirements, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts). + +![HA architecture](/images/docs/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png) + +## Download KubeKey + +Refer to the following steps to download KubeKey. + +{{< tabs >}} + +{{< tab "Good network connections to GitHub/Googleapis" >}} + +Download KubeKey from [its GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command. + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - +``` + +{{}} + +{{< tab "Poor network connections to GitHub/Googleapis" >}} + +Run the following command first to make sure that you download KubeKey from the correct zone. + +```bash +export KKZONE=cn +``` + +Run the following command to download KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - +``` + +{{< notice note >}} + +After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the following steps. + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +The preceding commands download the latest release of KubeKey (v1.2.1). You can modify the version number in the command to download a specific version. + +{{}} + +Make `kk` executable: + +```bash +chmod +x kk +``` + +Create an example configuration file with default configurations. Here Kubernetes v1.21.5 is used as an example. + +```bash +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 +``` + +{{< notice note >}} + +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. +- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. + +{{}} + +## Deploy KubeSphere and Kubernetes + +After you run the preceding commands, a configuration file `config-sample.yaml` is created. Edit the file to add machine information, configure the load balancer and more. + +{{< notice note >}} + +The file name may be different if you customize it. + +{{}} + +### config-sample.yaml example + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + master: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +For more information about different fields in this configuration file, see [Kubernetes Cluster Configurations](../../../installing-on-linux/introduction/vars/) and [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file). + +### Enable the built-in high availability mode + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +{{< notice note >}} + +- To enable the built-in high availability mode, uncomment the field `internalLoadbalancer`. +- The fields `address` and `port` in `config-sample.yaml` must be indented by two spaces against `controlPlaneEndpoint`. +- The default internal access domain name for the load balancer is `lb.kubesphere.local`. + +{{}} + +### Persistent storage plugin configurations + +For a production environment, you need to prepare persistent storage and configure the storage plugin (for example, CSI) in `config-sample.yaml` to define which storage service you want to use. For more information, see [Persistent Storage Configurations](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). + +### (Optional) Enable pluggable components + +KubeSphere has decoupled some core feature components since v2.1.0. These components are designed to be pluggable which means you can enable them either before or after installation. By default, KubeSphere is installed with the minimal package if you do not enable them. + +You can enable any of them according to your demands. It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere. Make sure your machines have sufficient CPU and memory before enabling them. See [Enable Pluggable Components](../../../pluggable-components/) for details. + +### Start installation + +After you complete the configuration, run the following command to start installation: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### Verify installation + +1. Run the following command to inspect the logs of installation. + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. When you see the following message, it means your HA cluster is successfully created. + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/en/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md b/content/en/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md index 91fc4d601..0c7c6f883 100644 --- a/content/en/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md +++ b/content/en/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md @@ -1,9 +1,10 @@ --- -title: "Set up an HA Cluster Using Keepalived and HAproxy" -keywords: 'KubeSphere, Kubernetes, HA, high availability, installation, configuration, Keepalived, HAproxy' +title: "Set up an HA Kubernetes Cluster Using Keepalived and HAproxy" +keywords: 'Kubernetes, KubeSphere, HA, high availability, installation, configuration, Keepalived, HAproxy' description: 'Learn how to create a highly available cluster using Keepalived and HAproxy.' linkTitle: "Set up an HA Cluster Using Keepalived and HAproxy" -weight: 3220 +weight: 3230 +showSubscribe: true --- A highly available Kubernetes cluster ensures your applications run without outages which is required for production. In this connection, there are plenty of ways for you to choose from to achieve high availability. @@ -50,7 +51,7 @@ Run the following command to install Keepalived and HAproxy first. yum install keepalived haproxy psmisc -y ``` -### HAproxy +### HAproxy Configuration 1. The configuration of HAproxy is exactly the same on the two machines for load balancing. Run the following command to configure HAproxy. @@ -111,7 +112,7 @@ yum install keepalived haproxy psmisc -y 5. Make sure you configure HAproxy on the other machine (`lb2`) as well. -### Keepalived +### Keepalived Configuration Keepalived must be installed on both machines while the configuration of them is slightly different. @@ -267,7 +268,7 @@ Before you start to create your Kubernetes cluster, make sure you have tested th Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -283,7 +284,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -298,7 +299,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -308,15 +309,15 @@ Make `kk` executable: chmod +x kk ``` -Create an example configuration file with default configurations. Here Kubernetes v1.20.4 is used as an example. +Create an example configuration file with default configurations. Here Kubernetes v1.21.5 is used as an example. ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. diff --git a/content/en/docs/installing-on-linux/introduction/air-gapped-installation.md b/content/en/docs/installing-on-linux/introduction/air-gapped-installation.md index afcca0362..17414d780 100644 --- a/content/en/docs/installing-on-linux/introduction/air-gapped-installation.md +++ b/content/en/docs/installing-on-linux/introduction/air-gapped-installation.md @@ -149,7 +149,7 @@ docker run -d \ ## Step 3: Download KubeKey -Similar to installing KubeSphere on Linux in an online environment, you also need to [download KubeKey v1.1.1](https://github.com/kubesphere/kubekey/releases) first. Download the `tar.gz` file, and transfer it to your local machine which serves as the taskbox for installation. After you uncompress the file, execute the following command to make `kk` executable: +Similar to installing KubeSphere on Linux in an online environment, you also need to [download KubeKey v1.2.1](https://github.com/kubesphere/kubekey/releases) first. Download the `tar.gz` file, and transfer it to your local machine which serves as the taskbox for installation. After you uncompress the file, execute the following command to make `kk` executable: ```bash chmod +x kk @@ -162,7 +162,7 @@ As you install KubeSphere and Kubernetes on Linux, you need to prepare an image 1. Download the image list file `images-list.txt` from a machine that has access to the Internet through the following command: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} @@ -174,7 +174,7 @@ As you install KubeSphere and Kubernetes on Linux, you need to prepare an image 2. Download `offline-installation-tool.sh`. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. Make the `.sh` file executable. @@ -204,18 +204,18 @@ As you install KubeSphere and Kubernetes on Linux, you need to prepare an image 5. Download the Kubernetes binary file. ```bash - ./offline-installation-tool.sh -b -v v1.17.9 + ./offline-installation-tool.sh -b -v v1.21.5 ``` If you cannot access the object storage service of Google, run the following command instead to add the environment variable to change the source. ```bash - export KKZONE=cn;./offline-installation-tool.sh -b -v v1.17.9 + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.21.5 ``` {{< notice note >}} - - You can change the Kubernetes version downloaded based on your needs. Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../kubekey/#support-matrix). + - You can change the Kubernetes version downloaded based on your needs. Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../kubekey/#support-matrix). - After you run the script, a folder `kubekey` is automatically created. Note that this file and `kk` must be placed in the same directory when you create the cluster later. @@ -262,7 +262,7 @@ Execute the following command to generate an example configuration file for inst For example: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -307,7 +307,7 @@ spec: address: "" port: 6443 kubernetes: - version: v1.17.9 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -328,7 +328,7 @@ metadata: name: ks-installer namespace: kubesphere-system labels: - version: v3.1.1 + version: v3.2.1 spec: persistence: storageClass: "" @@ -360,7 +360,7 @@ spec: enabled: false username: "" password: "" - externalElasticsearchUrl: "" + externalElasticsearchHost: "" externalElasticsearchPort: "" console: enableMultiLogin: true @@ -499,162 +499,157 @@ To access the console, make sure port 30880 is opened in your security group. ## Appendix -### Image list of KubeSphere v3.1.1 +### Image list of KubeSphere 3.2.1 ```txt ##k8s-images -kubesphere/kube-apiserver:v1.20.6 -kubesphere/kube-scheduler:v1.20.6 -kubesphere/kube-proxy:v1.20.6 -kubesphere/kube-controller-manager:v1.20.6 -kubesphere/kube-apiserver:v1.19.8 -kubesphere/kube-scheduler:v1.19.8 -kubesphere/kube-proxy:v1.19.8 -kubesphere/kube-controller-manager:v1.19.8 +kubesphere/kube-apiserver:v1.22.1 +kubesphere/kube-controller-manager:v1.22.1 +kubesphere/kube-proxy:v1.22.1 +kubesphere/kube-scheduler:v1.22.1 +kubesphere/kube-apiserver:v1.21.5 +kubesphere/kube-controller-manager:v1.21.5 +kubesphere/kube-proxy:v1.21.5 +kubesphere/kube-scheduler:v1.21.5 +kubesphere/kube-apiserver:v1.20.10 +kubesphere/kube-controller-manager:v1.20.10 +kubesphere/kube-proxy:v1.20.10 +kubesphere/kube-scheduler:v1.20.10 kubesphere/kube-apiserver:v1.19.9 -kubesphere/kube-scheduler:v1.19.9 -kubesphere/kube-proxy:v1.19.9 kubesphere/kube-controller-manager:v1.19.9 -kubesphere/kube-apiserver:v1.18.8 -kubesphere/kube-scheduler:v1.18.8 -kubesphere/kube-proxy:v1.18.8 -kubesphere/kube-controller-manager:v1.18.8 -kubesphere/kube-apiserver:v1.17.9 -kubesphere/kube-scheduler:v1.17.9 -kubesphere/kube-proxy:v1.17.9 -kubesphere/kube-controller-manager:v1.17.9 -kubesphere/pause:3.1 -kubesphere/pause:3.2 -kubesphere/etcd:v3.4.13 -calico/cni:v3.16.3 -calico/kube-controllers:v3.16.3 -calico/node:v3.16.3 -calico/pod2daemon-flexvol:v3.16.3 -calico/typha:v3.16.3 +kubesphere/kube-proxy:v1.19.9 +kubesphere/kube-scheduler:v1.19.9 +kubesphere/pause:3.5 +kubesphere/pause:3.4.1 +coredns/coredns:1.8.0 +calico/cni:v3.20.0 +calico/kube-controllers:v3.20.0 +calico/node:v3.20.0 +calico/pod2daemon-flexvol:v3.20.0 +calico/typha:v3.20.0 kubesphere/flannel:v0.12.0 -coredns/coredns:1.6.9 -kubesphere/k8s-dns-node-cache:1.15.12 openebs/provisioner-localpv:2.10.1 openebs/linux-utils:2.10.0 -kubesphere/nfs-client-provisioner:v3.1.0-k8s1.11 -##csi-images -csiplugin/csi-neonsan:v1.2.0 -csiplugin/csi-neonsan-ubuntu:v1.2.0 -csiplugin/csi-neonsan-centos:v1.2.0 -csiplugin/csi-provisioner:v1.5.0 -csiplugin/csi-attacher:v2.1.1 -csiplugin/csi-resizer:v0.4.0 -csiplugin/csi-snapshotter:v2.0.1 -csiplugin/csi-node-driver-registrar:v1.2.0 -csiplugin/csi-qingcloud:v1.2.0 +kubesphere/k8s-dns-node-cache:1.15.12 ##kubesphere-images -kubesphere/ks-apiserver:v3.1.1 -kubesphere/ks-console:v3.1.1 -kubesphere/ks-controller-manager:v3.1.1 -kubesphere/ks-installer:v3.1.1 +kubesphere/ks-installer:v3.2.1 +kubesphere/ks-apiserver:v3.2.1 +kubesphere/ks-console:v3.2.1 +kubesphere/ks-controller-manager:v3.2.1 kubesphere/kubectl:v1.20.0 -kubesphere/kubectl:v1.19.0 -redis:5.0.12-alpine -alpine:3.14 -haproxy:2.0.22-alpine -nginx:1.14-alpine +kubesphere/kubefed:v0.8.1 +kubesphere/tower:v0.2.0 minio/minio:RELEASE.2019-08-07T01-59-21Z minio/mc:RELEASE.2019-08-07T23-14-43Z +csiplugin/snapshot-controller:v4.0.0 +kubesphere/nginx-ingress-controller:v0.48.1 mirrorgooglecontainers/defaultbackend-amd64:1.4 -kubesphere/nginx-ingress-controller:v0.35.0 -osixia/openldap:1.3.0 -csiplugin/snapshot-controller:v3.0.3 -kubesphere/kubefed:v0.7.0 -kubesphere/tower:v0.2.0 -kubesphere/prometheus-config-reloader:v0.42.1 -kubesphere/prometheus-operator:v0.42.1 -prom/alertmanager:v0.21.0 -prom/prometheus:v2.26.0 -prom/node-exporter:v0.18.1 -kubesphere/ks-alerting-migration:v3.1.0 -jimmidyson/configmap-reload:v0.3.0 -kubesphere/notification-manager-operator:v1.0.0 -kubesphere/notification-manager:v1.0.0 kubesphere/metrics-server:v0.4.2 +redis:5.0.14-alpine +haproxy:2.0.25-alpine +alpine:3.14 +osixia/openldap:1.3.0 +kubesphere/netshoot:v1.0 +##kubeedge-images +kubeedge/cloudcore:v1.7.2 +kubesphere/edge-watcher:v0.1.1 +kubesphere/edge-watcher-agent:v0.1.0 +##gatekeeper-images +openpolicyagent/gatekeeper:v3.5.2 +##openpitrix-images +kubesphere/openpitrix-jobs:v3.2.1 +##kubesphere-devops-images +kubesphere/devops-apiserver:v3.2.1 +kubesphere/devops-controller:v3.2.1 +kubesphere/devops-tools:v3.2.1 +kubesphere/ks-jenkins:v3.2.0-2.249.1 +jenkins/jnlp-slave:3.27-1 +kubesphere/builder-base:v3.2.0 +kubesphere/builder-nodejs:v3.2.0 +kubesphere/builder-maven:v3.2.0 +kubesphere/builder-python:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-base:v3.2.0-podman +kubesphere/builder-nodejs:v3.2.0-podman +kubesphere/builder-maven:v3.2.0-podman +kubesphere/builder-python:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/s2ioperator:v3.2.0 +kubesphere/s2irun:v3.2.0 +kubesphere/s2i-binary:v3.2.0 +kubesphere/tomcat85-java11-centos7:v3.2.0 +kubesphere/tomcat85-java11-runtime:v3.2.0 +kubesphere/tomcat85-java8-centos7:v3.2.0 +kubesphere/tomcat85-java8-runtime:v3.2.0 +kubesphere/java-11-centos7:v3.2.0 +kubesphere/java-8-centos7:v3.2.0 +kubesphere/java-8-runtime:v3.2.0 +kubesphere/java-11-runtime:v3.2.0 +kubesphere/nodejs-8-centos7:v3.2.0 +kubesphere/nodejs-6-centos7:v3.2.0 +kubesphere/nodejs-4-centos7:v3.2.0 +kubesphere/python-36-centos7:v3.2.0 +kubesphere/python-35-centos7:v3.2.0 +kubesphere/python-34-centos7:v3.2.0 +kubesphere/python-27-centos7:v3.2.0 +##kubesphere-monitoring-images +jimmidyson/configmap-reload:v0.3.0 +prom/prometheus:v2.26.0 +kubesphere/prometheus-config-reloader:v0.43.2 +kubesphere/prometheus-operator:v0.43.2 kubesphere/kube-rbac-proxy:v0.8.0 kubesphere/kube-state-metrics:v1.9.7 -openebs/provisioner-localpv:2.3.0 +prom/node-exporter:v0.18.1 +kubesphere/k8s-prometheus-adapter-amd64:v0.6.0 +prom/alertmanager:v0.21.0 thanosio/thanos:v0.18.0 grafana/grafana:7.4.3 +kubesphere/kube-rbac-proxy:v0.8.0 +kubesphere/notification-manager-operator:v1.4.0 +kubesphere/notification-manager:v1.4.0 +kubesphere/notification-tenant-sidecar:v3.2.0 ##kubesphere-logging-images -kubesphere/elasticsearch-oss:6.7.0-1 kubesphere/elasticsearch-curator:v5.7.6 -kubesphere/fluentbit-operator:v0.5.0 -kubesphere/fluentbit-operator:migrator -kubesphere/fluent-bit:v1.6.9 -elastic/filebeat:6.7.0 -kubesphere/kube-auditing-operator:v0.1.2 -kubesphere/kube-auditing-webhook:v0.1.2 -kubesphere/kube-events-exporter:v0.1.0 -kubesphere/kube-events-operator:v0.1.0 -kubesphere/kube-events-ruler:v0.2.0 -kubesphere/log-sidecar-injector:1.1 +kubesphere/elasticsearch-oss:6.7.0-1 +kubesphere/fluentbit-operator:v0.11.0 docker:19.03 +kubesphere/fluent-bit:v1.8.3 +kubesphere/log-sidecar-injector:1.1 +elastic/filebeat:6.7.0 +kubesphere/kube-events-operator:v0.3.0 +kubesphere/kube-events-exporter:v0.3.0 +kubesphere/kube-events-ruler:v0.3.0 +kubesphere/kube-auditing-operator:v0.2.0 +kubesphere/kube-auditing-webhook:v0.2.0 ##istio-images -istio/pilot:1.6.10 -istio/proxyv2:1.6.10 -jaegertracing/jaeger-agent:1.17 -jaegertracing/jaeger-collector:1.17 -jaegertracing/jaeger-es-index-cleaner:1.17 -jaegertracing/jaeger-operator:1.17.1 -jaegertracing/jaeger-query:1.17 -kubesphere/kiali:v1.26.1 -kubesphere/kiali-operator:v1.26.1 -##kubesphere-devops-images -kubesphere/ks-jenkins:2.249.1 -jenkins/jnlp-slave:3.27-1 -kubesphere/s2ioperator:v3.1.0 -kubesphere/s2irun:v2.1.1 -kubesphere/builder-base:v3.1.0 -kubesphere/builder-nodejs:v3.1.0 -kubesphere/builder-maven:v3.1.0 -kubesphere/builder-go:v3.1.0 -kubesphere/s2i-binary:v2.1.0 -kubesphere/tomcat85-java11-centos7:v2.1.0 -kubesphere/tomcat85-java11-runtime:v2.1.0 -kubesphere/tomcat85-java8-centos7:v2.1.0 -kubesphere/tomcat85-java8-runtime:v2.1.0 -kubesphere/java-11-centos7:v2.1.0 -kubesphere/java-8-centos7:v2.1.0 -kubesphere/java-8-runtime:v2.1.0 -kubesphere/java-11-runtime:v2.1.0 -kubesphere/nodejs-8-centos7:v2.1.0 -kubesphere/nodejs-6-centos7:v2.1.0 -kubesphere/nodejs-4-centos7:v2.1.0 -kubesphere/python-36-centos7:v2.1.0 -kubesphere/python-35-centos7:v2.1.0 -kubesphere/python-34-centos7:v2.1.0 -kubesphere/python-27-centos7:v2.1.0 -##openpitrix-images -kubespheredev/openpitrix-jobs:v3.1.1 -##weave-scope-images -weaveworks/scope:1.13.0 -##kubeedge-images -kubeedge/cloudcore:v1.6.2 -kubesphere/edge-watcher:v0.1.0 -kubesphere/kube-rbac-proxy:v0.5.0 -kubesphere/edge-watcher-agent:v0.1.0 -##example-images-images -kubesphere/examples-bookinfo-productpage-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v2:1.16.2 -kubesphere/examples-bookinfo-reviews-v3:1.16.2 -kubesphere/examples-bookinfo-details-v1:1.16.2 -kubesphere/examples-bookinfo-ratings-v1:1.16.3 +istio/pilot:1.11.1 +istio/proxyv2:1.11.1 +jaegertracing/jaeger-operator:1.27 +jaegertracing/jaeger-agent:1.27 +jaegertracing/jaeger-collector:1.27 +jaegertracing/jaeger-query:1.27 +jaegertracing/jaeger-es-index-cleaner:1.27 +kubesphere/kiali-operator:v1.38.1 +kubesphere/kiali:v1.38 +##example-images busybox:1.31.1 +nginx:1.14-alpine joosthofman/wget:1.0 -kubesphere/netshoot:v1.0 nginxdemos/hello:plain-text wordpress:4.8-apache mirrorgooglecontainers/hpa-example:latest java:openjdk-8-jre-alpine fluent/fluentd:v1.4.2-2.0 perl:latest +kubesphere/examples-bookinfo-productpage-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v2:1.16.2 +kubesphere/examples-bookinfo-details-v1:1.16.2 +kubesphere/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +weaveworks/scope:1.13.0 ``` diff --git a/content/en/docs/installing-on-linux/introduction/kubekey.md b/content/en/docs/installing-on-linux/introduction/kubekey.md index 87a6325c8..412eb3bf4 100644 --- a/content/en/docs/installing-on-linux/introduction/kubekey.md +++ b/content/en/docs/installing-on-linux/introduction/kubekey.md @@ -18,7 +18,7 @@ There are several scenarios to use KubeKey: ## How Does KubeKey Work -After you download KubeKey, you use an executable called `kk` to perform different operations. No matter you use it to create, scale or upgrade a cluster, you must prepare a configuration file using `kk` beforehand. This configuration file contains basic parameters of your cluster, such as host information, network configurations (CNI plugin and Pod and Service CIDR), registry mirrors, add-ons (YAML or Chart) and pluggable component options (if you install KubeSphere). For more information, see [an example configuration file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +After you download KubeKey, you use an executable called `kk` to perform different operations. No matter you use it to create, scale or upgrade a cluster, you must prepare a configuration file using `kk` beforehand. This configuration file contains basic parameters of your cluster, such as host information, network configurations (CNI plugin and Pod and Service CIDR), registry mirrors, add-ons (YAML or Chart) and pluggable component options (if you install KubeSphere). For more information, see [an example configuration file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). With the configuration file in place, you execute the `./kk` command with varied flags for different operations. After that, KubeKey automatically installs Docker and pulls all the necessary images for installation. When the installation is complete, you can inspect installation logs. @@ -38,7 +38,7 @@ With the configuration file in place, you execute the `./kk` command with varied Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -54,7 +54,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -69,21 +69,21 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} ## Support Matrix -If you want to use KubeKey to install both Kubernetes and KubeSphere v3.1.1, see the following table of all supported Kubernetes versions. +If you want to use KubeKey to install both Kubernetes and KubeSphere 3.2.1, see the following table of all supported Kubernetes versions. | KubeSphere version | Supported Kubernetes versions | | ------------------ | ------------------------------------------------------------ | -| v3.1.1 | v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9, v1.18.3, v1.18.5, v1.18.6, v1.18.8, v1.19.0, v1.19.8, v1.19.9, v1.20.4, v1.20.6 | +| v3.2.1 | v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental) | {{< notice note >}} - You can also run `./kk version --show-supported-k8s` to see all supported Kubernetes versions that can be installed by KubeKey. -- The Kubernetes versions that can be installed using KubeKey are different from the Kubernetes versions supported by KubeSphere v3.0.0. If you want to [install KubeSphere v3.1.1 on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/), your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- The Kubernetes versions that can be installed using KubeKey are different from the Kubernetes versions supported by KubeSphere v3.0.0. If you want to [install KubeSphere 3.2.1 on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/), your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). {{}} \ No newline at end of file diff --git a/content/en/docs/installing-on-linux/introduction/multioverview.md b/content/en/docs/installing-on-linux/introduction/multioverview.md index 528874928..1ec072650 100644 --- a/content/en/docs/installing-on-linux/introduction/multioverview.md +++ b/content/en/docs/installing-on-linux/introduction/multioverview.md @@ -107,7 +107,7 @@ Follow the step below to download [KubeKey](../kubekey). Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -123,7 +123,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -138,7 +138,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -162,7 +162,7 @@ Command: {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -177,7 +177,7 @@ Here are some examples for your reference: ./kk create config [-f ~/myfolder/abc.yaml] ``` -- You can specify a KubeSphere version that you want to install (for example, `--with-kubesphere v3.1.1`). +- You can specify a KubeSphere version that you want to install (for example, `--with-kubesphere v3.2.1`). ```bash ./kk create config --with-kubesphere [version] @@ -210,7 +210,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 ``` #### Hosts @@ -285,7 +285,7 @@ KubeKey will install [OpenEBS](https://openebs.io/) to provision [LocalPV](https {{< notice tip >}} - You can enable the multi-cluster feature by editing the configuration file. For more information, see [Multi-cluster Management](../../../multicluster-management/). -- You can also select the components you want to install. For more information, see [Enable Pluggable Components](../../../pluggable-components/). For an example of a complete `config-sample.yaml` file, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +- You can also select the components you want to install. For more information, see [Enable Pluggable Components](../../../pluggable-components/). For an example of a complete `config-sample.yaml` file, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). {{}} diff --git a/content/en/docs/installing-on-linux/introduction/vars.md b/content/en/docs/installing-on-linux/introduction/vars.md index 5174c9709..8186aa072 100644 --- a/content/en/docs/installing-on-linux/introduction/vars.md +++ b/content/en/docs/installing-on-linux/introduction/vars.md @@ -10,7 +10,7 @@ When creating a Kubernetes cluster, you can use [KubeKey](../kubekey/) to define ```yaml kubernetes: - version: v1.19.8 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local masqueradeAll: false @@ -45,7 +45,7 @@ The below table describes the above parameters in detail.
- + @@ -116,10 +116,11 @@ The below table describes the above parameters in detail.
versionThe Kubernetes version to be installed. If you do not specify a Kubernetes version, {{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v1.1.0 will install Kubernetes v1.19.8 by default. For more information, see {{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "Support Matrix" >}}.The Kubernetes version to be installed. If you do not specify a Kubernetes version, {{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v1.2.1 will install Kubernetes v1.21.5 by default. For more information, see {{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "Support Matrix" >}}.
imageRepo
+ {{< notice note >}} - \* By default, KubeKey does not define these parameters in the configuration file while you can manually add them and customize their values. -- `addons` is used to install cloud-native add-ons (YAML or Chart). For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/addons.md). -- This page only lists part of the parameters in the configuration file created by KubeKey. For more information about other parameters, see [this example file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +- `addons` is used to install cloud-native add-ons (YAML or Chart). For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/addons.md). +- This page only lists part of the parameters in the configuration file created by KubeKey. For more information about other parameters, see [this example file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). {{}} diff --git a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md index 7fa89adba..ff3757ac3 100644 --- a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md +++ b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md @@ -32,7 +32,7 @@ Follow the step below to download [KubeKey](../../../installing-on-linux/introdu Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -48,7 +48,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -63,7 +63,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. Note that an earlier version of KubeKey cannot be used to install K3s. +The commands above download the latest release (v1.2.1) of KubeKey. Note that an earlier version of KubeKey cannot be used to install K3s. {{}} @@ -78,12 +78,12 @@ chmod +x kk 1. Create a configuration file of your cluster by running the following command: ```bash - ./kk create config --with-kubernetes v1.20.4-k3s --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.4-k3s --with-kubesphere v3.2.1 ``` {{< notice note >}} - KubeKey v1.1.1 only supports the installation of K3s v1.20.4. + KubeKey v1.2.1 supports the installation of K3s v1.21.4. {{}} @@ -115,7 +115,7 @@ chmod +x kk address: "" port: 6443 kubernetes: - version: v1.20.4-k3s + version: v1.21.4-k3s imageRepo: kubesphere clusterName: cluster.local network: @@ -131,7 +131,7 @@ chmod +x kk {{< notice note >}} - For more information about each field in the configuration file, see [an example file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). + For more information about each field in the configuration file, see [an example file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). {{}} @@ -174,10 +174,6 @@ chmod +x kk 5. Access the KubeSphere console at `:30880` with the default account and password (`admin/P@88W0rd`). - ![cluster-management](/images/docs/installing-on-linux/on-premises/cluster-management.png) - - ![service-components](/images/docs/installing-on-linux/on-premises/service-components.png) - {{< notice note >}} You can enable pluggable components of KubeSphere after the installation while some features may not be compatible as KubeSphere on K3s is only experimental currently. diff --git a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md index d6b273f1e..9c3154638 100644 --- a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md +++ b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md @@ -199,7 +199,7 @@ Follow the step below to download KubeKey. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -215,7 +215,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -230,7 +230,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -244,15 +244,15 @@ chmod +x kk With KubeKey, you can install Kubernetes and KubeSphere together. You have the option to create a multi-node cluster by customizing parameters in the configuration file. -Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.1.1`): +Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.2.1`): ```bash -./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command above, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -286,7 +286,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 ``` Create a cluster using the configuration file you customized above: diff --git a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md index c60284373..d519d587b 100644 --- a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md +++ b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md @@ -289,7 +289,7 @@ systemctl status -l keepalived ## Download KubeKey -[Kubekey](https://github.com/kubesphere/kubekey) is the brand-new installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere v3.1.1. +[Kubekey](https://github.com/kubesphere/kubekey) is the brand-new installer which provides an easy, fast and flexible way to install Kubernetes and KubeSphere 3.2.1. Follow the step below to download KubeKey. @@ -300,7 +300,7 @@ Follow the step below to download KubeKey. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -316,7 +316,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -331,7 +331,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -345,15 +345,15 @@ chmod +x kk With KubeKey, you can install Kubernetes and KubeSphere together. You have the option to create a multi-node cluster by customizing parameters in the configuration file. -Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.1.1`): +Create a Kubernetes cluster with KubeSphere installed (for example, `--with-kubesphere v3.2.1`): ```bash -./kk create config --with-kubernetes v1.19.8 --with-kubesphere v3.1.1 +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -396,9 +396,9 @@ spec: domain: lb.kubesphere.local # vip address: "10.10.71.67" - port: "6443" + port: 6443 kubernetes: - version: v1.19.8 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] @@ -429,7 +429,7 @@ metadata: name: ks-installer namespace: kubesphere-system labels: - version: v3.1.1 + version: v3.2.1 spec: local_registry: "" persistence: @@ -454,10 +454,10 @@ spec: elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log - # externalElasticsearchUrl: + # externalElasticsearchHost: # externalElasticsearchPort: console: - enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time. + enableMultiLogin: false # enable/disable multiple sing on, it allows a user can be used by different users at the same time. port: 30880 alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. enabled: false diff --git a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md index d784d0961..c586cb5f6 100644 --- a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md +++ b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md @@ -119,7 +119,7 @@ Follow the steps below to download [KubeKey](../../../installing-on-linux/introd Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -135,7 +135,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -150,7 +150,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -165,12 +165,12 @@ chmod +x kk 1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + - Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -203,9 +203,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -284,11 +284,9 @@ glusterfs (default) kubernetes.io/glusterfs Delete Immediate ### KubeSphere console -1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. 3. Go to **Volumes** under **Storage**, and you can see PVCs in use. - - ![volumes-in-use](/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png) {{< notice note >}} @@ -296,6 +294,4 @@ glusterfs (default) kubernetes.io/glusterfs Delete Immediate {{}} -3. On the **Storage Classes** page, you can see the storage class available in your cluster. - - ![storage-class-available](/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png) \ No newline at end of file +4. On the **Storage Classes** page, you can see the storage class available in your cluster. \ No newline at end of file diff --git a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md index be4d4986c..9933ed94b 100644 --- a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md +++ b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md @@ -71,7 +71,7 @@ Follow the steps below to download [KubeKey](../../../installing-on-linux/introd Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -87,7 +87,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -102,7 +102,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -117,12 +117,12 @@ chmod +x kk 1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + - Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -155,9 +155,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -257,15 +257,11 @@ You can verify that NFS-client has been successfully installed either from the c ### KubeSphere console -1. Log in to the web console as `admin` with the default account and password at `:30880`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the web console as `admin` with the default account and password at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Go to **Pods** in **Application Workloads** and select `kube-system` from the project drop-down list. You can see that the Pod of `nfs-client` is up and running. - ![nfs-pod](/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png) - 3. Go to **Storage Classes** under **Storage**, and you can see available storage classes in your cluster. - - ![nfs-storage-class](/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png) {{< notice note >}} diff --git a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md index be782c80b..7c34f4da7 100644 --- a/content/en/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md +++ b/content/en/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md @@ -73,7 +73,7 @@ Follow the steps below to download [KubeKey](../../../installing-on-linux/introd Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -89,7 +89,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -104,7 +104,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -119,12 +119,12 @@ chmod +x kk 1. Specify a Kubernetes version and a KubeSphere version that you want to install. For example: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). + - Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -157,9 +157,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -261,15 +261,11 @@ You can verify that QingCloud CSI has been successfully installed either from th ### KubeSphere console -1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the web console with the default account and password (`admin/P@88w0rd`) at `:30880`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Go to **Pods** in **Application Workloads** and select `kube-system` from the project drop-down list. You can see that the Pods of `csi-qingcloud` are up and running. - ![qingcloud-csi-pod](/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png) - 3. Go to **Storage Classes** under **Storage**, and you can see available storage classes in your cluster. - - ![qingcloud-csi-storage-class](/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png) {{< notice note >}} diff --git a/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md b/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md index 613bd38e8..373698114 100644 --- a/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md +++ b/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md @@ -101,7 +101,7 @@ ssh -i .ssh/id_rsa2 -p50200 kubesphere@40.81.5.xx Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -117,7 +117,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -132,7 +132,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -142,15 +142,15 @@ The commands above download the latest release (v1.1.1) of KubeKey. You can chan chmod +x kk ``` -2. Create an example configuration file with default configurations. Here Kubernetes v1.20.4 is used as an example. +2. Create an example configuration file with default configurations. Here Kubernetes v1.21.5 is used as an example. ```bash - ./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 + ./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -182,7 +182,7 @@ spec: - node000001 - node000002 ``` -For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +For more information, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). ### Configure the load balancer @@ -194,7 +194,7 @@ In addition to node information, you need to configure your load balancer in the controlPlaneEndpoint: domain: lb.kubesphere.local address: "40.81.5.xx" - port: "6443" + port: 6443 ``` {{< notice note >}} diff --git a/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md b/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md index 8d4ea3af4..114b90068 100644 --- a/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md +++ b/content/en/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md @@ -126,7 +126,7 @@ Follow the step below to download KubeKey. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -142,7 +142,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -157,7 +157,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -167,15 +167,15 @@ Make `kk` executable: chmod +x kk ``` -Create an example configuration file with default configurations. Here Kubernetes v1.20.4 is used as an example. +Create an example configuration file with default configurations. Here Kubernetes v1.21.5 is used as an example. ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, and v1.20.4. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../../installing-on-linux/introduction/kubekey/#support-matrix). - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later. - If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. @@ -221,7 +221,7 @@ spec: - node3 ``` -For a complete configuration sample explanation, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +For a complete configuration sample explanation, see [this file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). ### Step 4: Configure the load balancer @@ -237,7 +237,7 @@ respectively, and you can refer to the following example. controlPlaneEndpoint: domain: lb.kubesphere.local address: "192.168.0.253" - port: "6443" + port: 6443 ``` {{< notice note >}} @@ -327,8 +327,6 @@ Both listeners show that the status is **Active**, meaning nodes are up and runn In the web console of KubeSphere, you can also see that all the nodes are functioning well. -![cluster-node](/images/docs/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png) - To verify if the cluster is highly available, you can turn off an instance on purpose. For example, the above console is accessed through the address `IP: 30880` (the EIP address here is the one bound to the external load balancer). If the cluster is highly available, the console will still work well even if you shut down a master node. ## See Also diff --git a/content/en/docs/introduction/features.md b/content/en/docs/introduction/features.md index ddb855a59..04d72045c 100644 --- a/content/en/docs/introduction/features.md +++ b/content/en/docs/introduction/features.md @@ -11,7 +11,7 @@ weight: 1300 As an [open source container platform](https://kubesphere.io/), KubeSphere provides enterprises with a robust, secure and feature-rich platform, boasting the most common functionalities needed for enterprises adopting Kubernetes, such as multi-cluster deployment and management, network policy configuration, Service Mesh (Istio-based), DevOps projects (CI/CD), security management, Source-to-Image and Binary-to-Image, multi-tenant management, multi-dimensional monitoring, log query and collection, alerting and notification, auditing, application management, and image registry management. -It also supports various open source storage and network solutions, as well as cloud storage services. For example, KubeSphere presents users with a powerful cloud-native tool [PorterLB](https://porterlb.io/), a CNCF-certified load balancer developed for bare metal Kubernetes clusters. +It also supports various open source storage and network solutions, as well as cloud storage services. For example, KubeSphere presents users with a powerful cloud-native tool [OpenELB](https://openelb.github.io/), a CNCF-certified load balancer developed for bare metal Kubernetes clusters. With an easy-to-use web console in place, KubeSphere eases the learning curve for users and drives the adoption of Kubernetes. @@ -122,7 +122,7 @@ KubeSphere features a self-updating monitoring system with graphical interfaces - **Third-party compatibility**. KubeSphere is compatible with Prometheus, which is the de facto metrics collection platform for monitoring in Kubernetes environments. Monitoring data can be seamlessly displayed in the web console of KubeSphere. - **Multi-dimensional monitoring at second-level precision**. - - For infrastructure monitoring, the system provides comprehensive metrics such as CPU utilization, memory utilization, CPU load average, disk usage, inode utilization, disk throughput, IOPS, network outbound/inbound rate, Pod status, ETCD service status, and API Server status. + - For infrastructure monitoring, the system provides comprehensive metrics such as CPU utilization, memory utilization, CPU load average, disk usage, inode utilization, disk throughput, IOPS, network outbound/inbound rate, Pod status, etcd service status, and API Server status. - For application resource monitoring, the system provides five key monitoring metrics: CPU utilization, memory consumption, Pod number, network outbound and inbound rate. Besides, users can sort data based on resource consumption and search metics by customizing the time range. In this way, occurring problems can be quickly located so that users can take necessary action. - **Ranking**. Users can sort data by node, workspace and project, which gives them a graphical view of how their resources are running in a straightforward way. - **Component monitoring**. It allows users to quickly locate any component failures to avoid unnecessary business downtime. @@ -159,7 +159,7 @@ For more information, please see [Project User Guide](../../project-user-guide/) - Open source network solutions are available such as Calico and Flannel. -- [PorterLB](https://github.com/kubesphere/porter), a load balancer developed for bare metal Kubernetes clusters, is designed by KubeSphere development team. This CNCF-certified tool serves as an important solution for developers. It mainly features: +- [OpenELB](https://github.com/kubesphere/openelb), a load balancer developed for bare metal Kubernetes clusters, is designed by KubeSphere development team. This CNCF-certified tool serves as an important solution for developers. It mainly features: 1. ECMP routing load balancing 2. BGP dynamic routing configuration diff --git a/content/en/docs/introduction/scenarios.md b/content/en/docs/introduction/scenarios.md index 708160185..033416f61 100644 --- a/content/en/docs/introduction/scenarios.md +++ b/content/en/docs/introduction/scenarios.md @@ -100,6 +100,6 @@ With a lightweight, highly scalable microservices architecture offered by KubeSp Sometimes, the cloud is not necessarily the ideal place for the deployment of resources. For example, physical, dedicated servers tend to function better when it comes to the cases that require considerable compute resources and high disk I/O. Besides, for some specialized workloads that are difficult to migrate to a cloud environment, certified hardware and complicated licensing and support agreements may be required. -KubeSphere can help enterprises deploy a containerized architecture on bare metal, load balancing traffic with a physical switch. In this connection, [PorterLB](https://github.com/kubesphere/porter), a CNCF-certified cloud-native tool is born for this end. At the same time, KubeSphere, together with QingCloud VPC and QingStor NeonSAN, provides users with a complete set of features ranging from load balancing, container platform building, network management, and storage. This means virtually all aspects of the containerized architecture can be fully controlled and uniformly managed, without sacrificing the performance in virtualization. +KubeSphere can help enterprises deploy a containerized architecture on bare metal, load balancing traffic with a physical switch. In this connection, [OpenELB](https://github.com/kubesphere/openelb), a CNCF-certified cloud-native tool is born for this end. At the same time, KubeSphere, together with QingCloud VPC and QingStor NeonSAN, provides users with a complete set of features ranging from load balancing, container platform building, network management, and storage. This means virtually all aspects of the containerized architecture can be fully controlled and uniformly managed, without sacrificing the performance in virtualization. For detailed information about how KubeSphere drives the development of numerous industries, please see [Case Studies](https://kubesphere.io/case/). diff --git a/content/en/docs/introduction/what's-new-in-3.1.0.md b/content/en/docs/introduction/what's-new-in-3.1.0.md deleted file mode 100644 index 6004cab45..000000000 --- a/content/en/docs/introduction/what's-new-in-3.1.0.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "What's New in 3.1.0" -keywords: 'Kubernetes, KubeSphere, new features' -description: "What's New in 3.1.0" -linkTitle: "What's New in 3.1.0" -weight: 1400 ---- - -On April 29, 2021, the KubeSphere community announced the general availability of KubeSphere 3.1.0. KubeSphere 3.1.0 provides an enabling environment for users as they deploy production workloads not just across clouds but also at the edge. Besides, the new Metering and Billing function helps you better understand the infrastructure operating costs. Existing features of multi-cluster management, multi-tenant management, observability, DevOps, application lifecycle management, and microservices governance have also been enhanced as we work to ensure better user experiences across clusters and clouds. - -In addition to the above highlights, KubeSphere 3.1.0 also features other functionality upgrades and fixed the known bugs. There were some deprecated or removed features in 3.1.0. For more and detailed information, see [Release Notes for 3.1.0](../../release/release-v310/). diff --git a/content/en/docs/introduction/what's-new-in-3.2.x.md b/content/en/docs/introduction/what's-new-in-3.2.x.md new file mode 100644 index 000000000..a4a12d304 --- /dev/null +++ b/content/en/docs/introduction/what's-new-in-3.2.x.md @@ -0,0 +1,13 @@ +--- +title: "What's New in 3.2.x" +keywords: 'Kubernetes, KubeSphere, new features' +description: "What's New in 3.2.x" +linkTitle: "What's New in 3.2.x" +weight: 1400 +--- + +On Nov 2, 2021, GPU resource scheduling and management and GPU usage monitoring further improve user experience in cloud-native AI scenarios. Moreover, enhanced features such as multi-cluster management, multi-tenant management , observability, DevOps, app store, and service mesh further perfect the interactive design for better user experience. + +In addition to the above highlights, KubeSphere 3.2.0 also features other functionality upgrades and fixed the known bugs. There were some deprecated or removed features in 3.2.0. For more and detailed information, see the [3.2.0 GA announcement](../../blog/kubesphere-3.2.0-ga-announcement/). + +On Dec 20, 2021, KubeSphere 3.2.1 was released. It introduced some enhancements, fixed some bugs, and brought better user experience, see the [Realease Notes for 3.2.1](../../release/release-v320/) for details. diff --git a/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md b/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md index 86e8cc855..f7d881ffb 100644 --- a/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md +++ b/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md @@ -6,9 +6,9 @@ titleLink: "Agent Connection" weight: 5220 --- -The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the Host Cluster (H Cluster) cannot access the Member Cluster (M Cluster) directly, you can expose the proxy service address of the H cluster. This enables the M Cluster to connect to the H Cluster through the agent. This method is applicable when the M Cluster is in a private environment (for example, IDC) and the H Cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers. +The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the host cluster cannot access the member cluster directly, you can expose the proxy service address of the host cluster. This enables the member cluster to connect to the host cluster through the agent. This method is applicable when the member cluster is in a private environment (for example, IDC) and the host cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers. -To use the multi-cluster feature using an agent, you must have at least two clusters serving as the H Cluster and the M Cluster respectively. A cluster can be defined as the H Cluster or the M Cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). +To use the multi-cluster feature using an agent, you must have at least two clusters serving as the host cluster and the member cluster respectively. A cluster can be defined as the host cluster or the member cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). ## Video Demonstration @@ -34,28 +34,59 @@ If you already have a standalone KubeSphere cluster installed, you can set the v kubectl edit cc ks-installer -n kubesphere-system ``` -In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective: +In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **OK** (if you use the web console) to make it effective: ```yaml multicluster: clusterRole: host ``` -You need to **wait for a while** so that the change can take effect. +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in the YAML file of `ks-installer`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- It is recommended that you set the host cluster name while you are preparing your host cluster. When your host cluster is set up and running with resources deployed, it is not recommended that you set the host cluster name. +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +You need to wait for a while so that the change can take effect. {{}} {{< tab "KubeSphere has not been installed" >}} -You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. +You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. + +To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. ```yaml multicluster: clusterRole: host ``` +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + {{< notice note >}} +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +{{< notice info >}} + If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a host cluster after KubeSphere is installed. {{}} @@ -93,7 +124,7 @@ tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP {{< notice note >}} -Generally, there is always a LoadBalancer solution in the public cloud, and the external IP can be allocated by the load balancer automatically. If your clusters are running in an on-premises environment, especially a **bare metal environment**, you can use [PorterLB](https://github.com/kubesphere/porter) as the LB solution. +Generally, there is always a LoadBalancer solution in the public cloud, and the external IP can be allocated by the load balancer automatically. If your clusters are running in an on-premises environment, especially a **bare metal environment**, you can use [OpenELB](https://github.com/kubesphere/openelb) as the LB solution. {{}} @@ -180,14 +211,14 @@ authentication: jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU ``` -Scroll down and set the value of `clusterRole` to `member`, then click **Update** (if you use the web console) to make it effective: +Scroll down and set the value of `clusterRole` to `member`, then click **OK** (if you use the web console) to make it effective: ```yaml multicluster: clusterRole: member ``` -You need to **wait for a while** so that the change can take effect. +You need to wait for a while so that the change can take effect. {{}} @@ -224,19 +255,11 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app= ## Import a Member Cluster 1. Log in to the KubeSphere console as `admin` and click **Add Cluster** on the **Cluster Management** page. - - ![add-cluster](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.png) -2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the top-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. +2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the upper-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. - ![cluster-info](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.png) +3. In **Connection Method**, select **Agent connection** and click **Create**. It will show the YAML configuration file for the agent Deployment generated by the host cluster on the console. -3. In **Connection Method**, select **Agent Connection** and click **Create**. It will show the YAML configuration file for the agent Deployment generated by the H Cluster on the console. +4. Create an `agent.yaml` file on the member cluster based on the instruction, then copy and paste the agent deployment to the file. Execute `kubectl create -f agent.yaml` on the node and wait for the agent to be up and running. Please make sure the proxy address is accessible to the member cluster. - ![select-agent-connection](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png) - -4. Create an `agent.yaml` file on the M Cluster based on the instruction, then copy and paste the agent deployment to the file. Execute `kubectl create -f agent.yaml` on the node and wait for the agent to be up and running. Please make sure the proxy address is accessible to the M Cluster. - -5. You can see the cluster you have imported in the H Cluster when the cluster agent is up and running. - - ![cluster-imported](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.png) \ No newline at end of file +5. You can see the cluster you have imported in the host cluster when the cluster agent is up and running. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md b/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md index d8c331356..663a0a379 100644 --- a/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md +++ b/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md @@ -6,9 +6,9 @@ titleLink: "Direct Connection" weight: 5210 --- -If the kube-apiserver address of the Member Cluster (M Cluster) is accessible on any node of the Host Cluster (H Cluster), you can adopt **Direction Connection**. This method is applicable when the kube-apiserver address of the M Cluster can be exposed or H Cluster and M Cluster are in the same private network or subnet. +If the kube-apiserver address of the member cluster is accessible on any node of the host cluster, you can adopt **Direction Connection**. This method is applicable when the kube-apiserver address of the member cluster can be exposed or host cluster and member cluster are in the same private network or subnet. -To use the multi-cluster feature using direct connection, you must have at least two clusters serving as the H Cluster and the M Cluster respectively. A cluster can be defined as the H Cluster or the M Cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). +To use the multi-cluster feature using direct connection, you must have at least two clusters serving as the host cluster and the member cluster respectively. A cluster can be defined as the host cluster or the member cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux/) and [Installing on Kubernetes](../../../installing-on-kubernetes/). ## Video Demonstration @@ -34,28 +34,59 @@ If you already have a standalone KubeSphere cluster installed, you can set the v kubectl edit cc ks-installer -n kubesphere-system ``` -In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective: +In the YAML file of `ks-installer`, navigate to `multicluster`, set the value of `clusterRole` to `host`, then click **OK** (if you use the web console) to make it effective: ```yaml multicluster: clusterRole: host ``` -You need to **wait for a while** so that the change can take effect. +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in the YAML file of `ks-installer`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + +{{< notice note >}} + +- It is recommended that you set the host cluster name while you are preparing your host cluster. When your host cluster is set up and running with resources deployed, it is not recommended that you set the host cluster name. +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +You need to wait for a while so that the change can take effect. {{}} {{< tab "KubeSphere has not been installed" >}} -You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. +You can define a host cluster before you install KubeSphere either on Linux or on an existing Kubernetes cluster. If you want to [install KubeSphere on Linux](../../../installing-on-linux/introduction/multioverview/#1-create-an-example-configuration-file), you use a `config-sample.yaml` file. If you want to [install KubeSphere on an existing Kubernetes cluster](../../../installing-on-kubernetes/introduction/overview/#deploy-kubesphere), you use two YAML files, one of which is `cluster-configuration.yaml`. + +To set a host cluster, change the value of `clusterRole` to `host` in `config-sample.yaml` or `cluster-configuration.yaml` accordingly before you install KubeSphere. ```yaml multicluster: clusterRole: host ``` +To set the host cluster name, add a field `hostClusterName` under `multicluster.clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml`: + +```yaml +multicluster: + clusterRole: host + hostClusterName: +``` + {{< notice note >}} +- The host cluster name can contain only lowercase letters, numbers, hyphens (-), or periods (.), and must start and end with a lowercase letter or number. + +{{}} + +{{< notice info >}} + If you install KubeSphere on a single-node cluster ([All-in-One](../../../quick-start/all-in-one-on-linux/)), you do not need to create a `config-sample.yaml` file. In this case, you can set a host cluster after KubeSphere is installed. {{}} @@ -107,7 +138,7 @@ authentication: jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU ``` -Scroll down and set the value of `clusterRole` to `member`, then click **Update** (if you use the web console) to make it effective: +Scroll down and set the value of `clusterRole` to `member`, then click **OK** (if you use the web console) to make it effective: ```yaml multicluster: @@ -151,23 +182,15 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app= ## Import a Member Cluster 1. Log in to the KubeSphere console as `admin` and click **Add Cluster** on the **Cluster Management** page. - - ![add-cluster](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.png) -2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the top-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. +2. Enter the basic information of the cluster to be imported on the **Import Cluster** page. You can also click **Edit Mode** in the upper-right corner to view and edit the basic information in YAML format. After you finish editing, click **Next**. - ![cluster-info](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.png) - -3. In **Connection Method**, select **Direct Connection**, and copy the kubeconfig of the Member Cluster and paste it into the box. You can also click **Edit Mode** in the top-right corner to edit the kubeconfig of the Member Cluster in YAML format. +3. In **Connection Method**, select **Direct connection**, and copy the kubeconfig of the member cluster and paste it into the box. You can also click **Edit Mode** in the upper-right corner to edit the kubeconfig of the member cluster in YAML format. {{< notice note >}} -Make sure the `server` address in KubeConfig is accessible on any node of the Host Cluster. +Make sure the `server` address in KubeConfig is accessible on any node of the host cluster. {{}} - - ![kubeconfig](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.png) -4. Click **Create** and wait for cluster initialization to finish. - - ![cluster-imported](/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.png) \ No newline at end of file +4. Click **Create** and wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md index 85e5f8398..d33517a4f 100644 --- a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md +++ b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md @@ -10,14 +10,14 @@ This tutorial demonstrates how to import an Alibaba Cloud Kubernetes (ACK) clust ## Prerequisites -- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the Host Cluster. For more information about how to prepare a Host Cluster, refer to [Prepare a Host Cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). -- You have an ACK cluster with KubeSphere installed to be used as the Member Cluster. +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have an ACK cluster with KubeSphere installed to be used as the member cluster. ## Import an ACK Cluster ### Step 1: Prepare the ACK Member Cluster -1. In order to manage the Member Cluster from the Host Cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your Host Cluster. +1. In order to manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -33,12 +33,8 @@ This tutorial demonstrates how to import an Alibaba Cloud Kubernetes (ACK) clust 3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. - ![search-config](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png) - 4. Click on the right and then select **Edit YAML** to edit `ks-installer`. - ![click-edit](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png) - 5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes. ```yaml @@ -63,20 +59,12 @@ Log in to the web console of Alibaba Cloud. Go to **Clusters** under **Container ![kubeconfig](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/kubeconfig.png) -### Step 3: Import the ACK Member Cluster +### Step 3: Import the ACK member cluster -1. Log in to the KubeSphere console on your Host Cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. - - ![click-add-cluster](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png) +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. 2. Enter the basic information based on your needs and click **Next**. - ![input-info](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png) +3. In **Connection Method**, select **Direct connection**. Fill in the kubeconfig file of the ACK member cluster and then click **Create**. -3. In **Connection Method**, select **Direct Connection**. Fill in the kubeconfig file of the ACK Member Cluster and then click **Create**. - - ![select-method](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png) - -4. Wait for cluster initialization to finish. - - ![ack-cluster-imported](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png) \ No newline at end of file +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md index 8cac5d377..1882a0ac9 100644 --- a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md +++ b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md @@ -10,18 +10,18 @@ This tutorial demonstrates how to import an AWS EKS cluster through the [direct ## Prerequisites -- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the Host Cluster. For more information about how to prepare a Host Cluster, refer to [Prepare a Host Cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). -- You have an EKS cluster to be used as the Member Cluster. +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have an EKS cluster to be used as the member cluster. ## Import an EKS Cluster -### Step 1: Deploy KubeSphere on your EKS Cluster +### Step 1: Deploy KubeSphere on your EKS cluster You need to deploy KubeSphere on your EKS cluster first. For more information about how to deploy KubeSphere on EKS, refer to [Deploy KubeSphere on AWS EKS](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/#install-kubesphere-on-eks). -### Step 2: Prepare the EKS Member Cluster +### Step 2: Prepare the EKS member cluster -1. In order to manage the Member Cluster from the Host Cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your Host Cluster. +1. In order to manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -37,12 +37,8 @@ You need to deploy KubeSphere on your EKS cluster first. For more information ab 3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. - ![search-config](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png) - 4. Click on the right and then select **Edit YAML** to edit `ks-installer`. - ![click-edit](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png) - 5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. Click **Update** to save your changes. ```yaml @@ -164,20 +160,12 @@ You need to deploy KubeSphere on your EKS cluster first. For more information ab ip-10-0-8-148.cn-north-1.compute.internal Ready 78m v1.18.8-eks-7c9bda ``` -### Step 4: Import the EKS Member Cluster +### Step 4: Import the EKS member cluster -1. Log in to the KubeSphere console on your Host Cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. - - ![click-add-cluster](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png) +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. 2. Enter the basic information based on your needs and click **Next**. - ![input-info](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png) +3. In **Connection Method**, select **Direct connection**. Fill in the new kubeconfig file of the EKS member cluster and then click **Create**. -3. In **Connection Method**, select **Direct Connection**. Fill in the new kubeconfig file of the EKS Member Cluster and then click **Create**. - - ![eks-kubeconfig](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png) - -4. Wait for cluster initialization to finish. - - ![eks-overview](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png) \ No newline at end of file +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md index e6aae3c9f..c254a43af 100644 --- a/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md +++ b/content/en/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md @@ -10,18 +10,18 @@ This tutorial demonstrates how to import a GKE cluster through the [direct conne ## Prerequisites -- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the Host Cluster. For more information about how to prepare a Host Cluster, refer to [Prepare a Host Cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). -- You have a GKE cluster to be used as the Member Cluster. +- You have a Kubernetes cluster with KubeSphere installed, and prepared this cluster as the host cluster. For more information about how to prepare a host cluster, refer to [Prepare a host cluster](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-host-cluster). +- You have a GKE cluster to be used as the member cluster. ## Import a GKE Cluster -### Step 1: Deploy KubeSphere on your GKE Cluster +### Step 1: Deploy KubeSphere on your GKE cluster You need to deploy KubeSphere on your GKE cluster first. For more information about how to deploy KubeSphere on GKE, refer to [Deploy KubeSphere on GKE](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/). -### Step 2: Prepare the GKE Member Cluster +### Step 2: Prepare the GKE member cluster -1. In order to manage the Member Cluster from the Host Cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your Host Cluster. +1. To manage the member cluster from the host cluster, you need to make `jwtSecret` the same between them. Therefore, get it first by executing the following command on your host cluster. ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -37,12 +37,8 @@ You need to deploy KubeSphere on your GKE cluster first. For more information ab 3. Go to **CRDs**, enter `ClusterConfiguration` in the search bar, and then press **Enter** on your keyboard. Click **ClusterConfiguration** to go to its detail page. - ![search-config](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png) - 4. Click on the right and then select **Edit YAML** to edit `ks-installer`. - ![click-edit](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png) - 5. In the YAML file of `ks-installer`, change the value of `jwtSecret` to the corresponding value shown above and set the value of `clusterRole` to `member`. ```yaml @@ -109,20 +105,12 @@ You need to deploy KubeSphere on your GKE cluster first. For more information ab token: eyJhbGciOiJSUzI1NiIsImtpZCI6InNjOFpIb3RrY3U3bGNRSV9NWV8tSlJzUHJ4Y2xnMDZpY3hhc1BoVy0xTGsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlc3BoZXJlLXRva2VuLXpocmJ3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmVzcGhlcmUiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyMGFmZGI1Ny01MTBkLTRjZDgtYTAwYS1hNDQzYTViNGM0M2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXNwaGVyZS1zeXN0ZW06a3ViZXNwaGVyZSJ9.ic6LaS5rEQ4tXt_lwp7U_C8rioweP-ZdDjlIZq91GOw9d6s5htqSMQfTeVlwTl2Bv04w3M3_pCkvRzMD0lHg3mkhhhP_4VU0LIo4XeYWKvWRoPR2kymLyskAB2Khg29qIPh5ipsOmGL9VOzD52O2eLtt_c6tn-vUDmI_Zw985zH3DHwUYhppGM8uNovHawr8nwZoem27XtxqyBkqXGDD38WANizyvnPBI845YqfYPY5PINPYc9bQBFfgCovqMZajwwhcvPqS6IpG1Qv8TX2lpuJIK0LLjiKaHoATGvHLHdAZxe_zgAC2cT_9Ars3HIN4vzaSX0f-xP--AcRgKVSY9g ``` -### Step 4: Import the GKE Member Cluster +### Step 4: Import the GKE member cluster -1. Log in to the KubeSphere console on your Host Cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. - - ![click-add-cluster](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png) +1. Log in to the KubeSphere console on your host cluster as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. On the **Cluster Management** page, click **Add Cluster**. 2. Enter the basic information based on your needs and click **Next**. - ![input-info](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png) +3. In **Connection Method**, select **Direct connection**. Fill in the new kubeconfig file of the GKE member cluster and then click **Create**. -3. In **Connection Method**, select **Direct Connection**. Fill in the new kubeconfig file of the GKE Member Cluster and then click **Create**. - - ![select-method](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png) - -4. Wait for cluster initialization to finish. - - ![gke-cluster-imported](/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png) \ No newline at end of file +4. Wait for cluster initialization to finish. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/import-on-prem-k8s/_index.md b/content/en/docs/multicluster-management/import-on-prem-k8s/_index.md deleted file mode 100644 index 8d0aeb228..000000000 --- a/content/en/docs/multicluster-management/import-on-prem-k8s/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -linkTitle: "Import On-premises Kubernetes Clusters" -weight: 5400 - -_build: - render: false ---- diff --git a/content/en/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md b/content/en/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md deleted file mode 100644 index 9370f4355..000000000 --- a/content/en/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Import Kubeadm Kubernetes Cluster" -keywords: 'kubernetes, kubesphere, multicluster, kubeadm' -description: 'Learn how to import a Kubernetes cluster created with kubeadm.' - - -weight: 5410 ---- - -TBD diff --git a/content/en/docs/multicluster-management/introduction/kubefed-in-kubesphere.md b/content/en/docs/multicluster-management/introduction/kubefed-in-kubesphere.md index ffad56b3c..f4dcc4d82 100644 --- a/content/en/docs/multicluster-management/introduction/kubefed-in-kubesphere.md +++ b/content/en/docs/multicluster-management/introduction/kubefed-in-kubesphere.md @@ -1,7 +1,7 @@ --- title: "KubeSphere Federation" keywords: 'Kubernetes, KubeSphere, federation, multicluster, hybrid-cloud' -description: 'Understand the fundamental concept of Kubernetes federation in KubeSphere, including M clusters and H clusters.' +description: 'Understand the fundamental concept of Kubernetes federation in KubeSphere, including member clusters and host clusters.' linkTitle: "KubeSphere Federation" weight: 5120 --- @@ -10,11 +10,11 @@ The multi-cluster feature relates to the network connection among multiple clust ## How the Multi-cluster Architecture Works -Before you use the central control plane of KubeSphere to management multiple clusters, you need to create a Host Cluster, also known as **H** Cluster. The H Cluster, essentially, is a KubeSphere cluster with the multi-cluster feature enabled. It provides you with the control plane for unified management of Member Clusters, also known as **M** Cluster. M Clusters are common KubeSphere clusters without the central control plane. Namely, tenants with necessary permissions (usually cluster administrators) can access the control plane from the H Cluster to manage all M Clusters, such as viewing and editing resources on M Clusters. Conversely, if you access the web console of any M Cluster separately, you cannot see any resources on other clusters. +Before you use the central control plane of KubeSphere to management multiple clusters, you need to create a host cluster, also known as **host** cluster. The host cluster, essentially, is a KubeSphere cluster with the multi-cluster feature enabled. It provides you with the control plane for unified management of member clusters, also known as **member** cluster. Member clusters are common KubeSphere clusters without the central control plane. Namely, tenants with necessary permissions (usually cluster administrators) can access the control plane from the host cluster to manage all member clusters, such as viewing and editing resources on member clusters. Conversely, if you access the web console of any member cluster separately, you cannot see any resources on other clusters. -![central-control-plane](/images/docs/multicluster-management/introduction/kubesphere-federation/central-control-plane.png) +There can only be one host cluster while multiple member clusters can exist at the same time. In a multi-cluster architecture, the network between the host cluster and member clusters can be [connected directly](../../enable-multicluster/direct-connection/) or [through an agent](../../enable-multicluster/agent-connection/). The network between member clusters can be set in a completely isolated environment. -There can only be one H Cluster while multiple M Clusters can exist at the same time. In a multi-cluster architecture, the network between the H Cluster and M Clusters can be connected directly or through an agent. The network between M Clusters can be set in a completely isolated environment. +If you are using on-premises Kubernetes clusters built through kubeadm, install KubeSphere on your Kubernetes clusters by referring to [Air-gapped Installation on Kubernetes](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/), and then enable KubeSphere multi-cluster management through direct connection or agent connection. ![kubesphere-federation](/images/docs/multicluster-management/introduction/kubesphere-federation/kubesphere-federation.png) @@ -38,12 +38,12 @@ Before you enable multi-cluster management, make sure you have enough resources {{< notice note >}} - The request and limit of CPU and memory resources all refer to single replica. -- After the multi-cluster feature is enabled, tower and controller-manager will be installed on the H Cluster. If you use [agent connection](../../../multicluster-management/enable-multicluster/agent-connection/), only tower is needed for M Clusters. If you use [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/), no additional component is needed for M Clusters. +- After the multi-cluster feature is enabled, tower and controller-manager will be installed on the host cluster. If you use [agent connection](../../../multicluster-management/enable-multicluster/agent-connection/), only tower is needed for member clusters. If you use [direct connection](../../../multicluster-management/enable-multicluster/direct-connection/), no additional component is needed for member clusters. {{}} ## Use the App Store in a Multi-cluster Architecture -Different from other components in KubeSphere, the [KubeSphere App Store](../../../pluggable-components/app-store/) serves as a global application pool for all clusters, including H Cluster and M Clusters. You only need to enable the App Store on the H Cluster and you can use functions related to the App Store on M Clusters directly (no matter whether the App Store is enabled on M Clusters or not), such as [app templates](../../../project-user-guide/application/app-template/) and [app repositories](../../../workspace-administration/app-repository/import-helm-repository/). +Different from other components in KubeSphere, the [KubeSphere App Store](../../../pluggable-components/app-store/) serves as a global application pool for all clusters, including host cluster and member clusters. You only need to enable the App Store on the host cluster and you can use functions related to the App Store on member clusters directly (no matter whether the App Store is enabled on member clusters or not), such as [app templates](../../../project-user-guide/application/app-template/) and [app repositories](../../../workspace-administration/app-repository/import-helm-repository/). -However, if you only enable the App Store on M Clusters without enabling it on the H Cluster, you will not be able to use the App Store on any cluster in the multi-cluster architecture. \ No newline at end of file +However, if you only enable the App Store on member clusters without enabling it on the host cluster, you will not be able to use the App Store on any cluster in the multi-cluster architecture. \ No newline at end of file diff --git a/content/en/docs/multicluster-management/unbind-cluster.md b/content/en/docs/multicluster-management/unbind-cluster.md index b3326402a..9f5dae030 100644 --- a/content/en/docs/multicluster-management/unbind-cluster.md +++ b/content/en/docs/multicluster-management/unbind-cluster.md @@ -11,20 +11,16 @@ This tutorial demonstrates how to unbind a cluster from the central control plan ## Prerequisites - You have enabled multi-cluster management. -- You need an account granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account. +- You need a user granted a role including the authorization of **Cluster Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to a user. ## Unbind a Cluster -1. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Click **Platform** in the upper-left corner and select **Cluster Management**. -2. On the **Cluster Management** page, click the cluster that you want to remove from the central control plane. - - ![cluster-management](/images/docs/multicluster-management/unbind-a-cluster/cluster-management.png) +2. On the **Cluster Management** page, click the cluster that you want to remove from the control plane. 3. Go to **Basic Information** under **Cluster Settings**, check **I confirm I want to unbind the cluster** and click **Unbind**. - ![unbind-cluster](/images/docs/multicluster-management/unbind-a-cluster/unbind-cluster.png) - {{< notice note >}} After you unbind the cluster, you cannot manage it from the control plane while Kubernetes resources on the cluster will not be deleted. diff --git a/content/en/docs/pluggable-components/alerting.md b/content/en/docs/pluggable-components/alerting.md index 0cce528fd..851e365b9 100644 --- a/content/en/docs/pluggable-components/alerting.md +++ b/content/en/docs/pluggable-components/alerting.md @@ -6,9 +6,9 @@ linkTitle: "KubeSphere Alerting" weight: 6600 --- -Alerting is an important building block of observability, closely related to monitoring and logging. The alerting system in KubeSphere, coupled with the proactive failure notification system, allows users to know activities of interest based on alerting policies. When a predefined threshold of a certain metric is reached, an alert will be sent to preconfigured recipients. Therefore, you need to configure the notification method beforehand, including Email, Slack, DingTalk, WeCom and Webhook. With a highly functional alerting and notification system in place, you can quickly identify and resolve potential issues in advance before they affect your business. +Alerting is an important building block of observability, closely related to monitoring and logging. The alerting system in KubeSphere, coupled with the proactive failure notification system, allows users to know activities of interest based on alerting policies. When a predefined threshold of a certain metric is reached, an alert will be sent to preconfigured recipients. Therefore, you need to configure the notification method beforehand, including Email, Slack, DingTalk, WeCom, and Webhook. With a highly functional alerting and notification system in place, you can quickly identify and resolve potential issues in advance before they affect your business. -## Enable Alerting before Installation +## Enable Alerting Before Installation ### Installing on Linux @@ -39,9 +39,9 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Alerting first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Alerting first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -57,14 +57,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Alerting after Installation +## Enable Alerting After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -72,9 +72,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `alerting` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `alerting` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml alerting: @@ -89,14 +89,12 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component If you can see **Alerting Messages** and **Alerting Policies** on the **Cluster Management** page, it means the installation is successful as the two parts won't display until the component is installed. -![alerting-section](/images/docs/enable-pluggable-components/kubesphere-alerting/alerting-section.png) - diff --git a/content/en/docs/pluggable-components/app-store.md b/content/en/docs/pluggable-components/app-store.md index 279506b9d..1f8eb9601 100644 --- a/content/en/docs/pluggable-components/app-store.md +++ b/content/en/docs/pluggable-components/app-store.md @@ -6,15 +6,13 @@ linkTitle: "KubeSphere App Store" weight: 6200 --- -As an open-source and app-centric container platform, KubeSphere provides users with a Helm-based App Store for application lifecycle management on the back of [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source web-based system to package, deploy and manage different types of apps. The KubeSphere App Store allows ISVs, developers and users to upload, test, deploy and release apps with just several clicks in a one-stop shop. +As an open-source and app-centric container platform, KubeSphere provides users with a Helm-based App Store for application lifecycle management on the back of [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source web-based system to package, deploy and manage different types of apps. The KubeSphere App Store allows ISVs, developers, and users to upload, test, install, and release apps with just several clicks in a one-stop shop. -Internally, the KubeSphere App Store can serve as a place for different teams to share data, middleware, and office applications. Externally, it is conducive to setting industry standards of building and delivery. By default, there are 17 built-in apps in the App Store. After you enable this feature, you can add more apps with app templates. - -![app-store](/images/docs/enable-pluggable-components/kubesphere-app-store/app-store.png) +Internally, the KubeSphere App Store can serve as a place for different teams to share data, middleware, and office applications. Externally, it is conducive to setting industry standards of building and delivery. After you enable this feature, you can add more apps with app templates. For more information, see [App Store](../../application-store/). -## Enable the App Store before Installation +## Enable the App Store Before Installation ### Installing on Linux @@ -46,9 +44,9 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the KubeSphere App Store first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the KubeSphere App Store first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -65,14 +63,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable the App Store after Installation +## Enable the App Store After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -82,9 +80,9 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml openpitrix: @@ -100,25 +98,23 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component -After you log in to the console, if you can see **App Store** in the top-left corner and 17 built-in apps in it, it means the installation is successful. - -![app-store](/images/docs/enable-pluggable-components/kubesphere-app-store/app-store.png) +After you log in to the console, if you can see **App Store** in the upper-left corner and apps in it, it means the installation is successful. {{< notice note >}} -- You can even access the App Store without logging in to the console by visiting `:30880/apps`. -- Different from previous versions, the **OpenPitrix** tab in KubeSphere 3.1 does not appear on the **Components** page after the App Store is enabled. +- You can even access the App Store without logging in to the console by visiting `:30880/apps`. +- The **OpenPitrix** tab in KubeSphere 3.2.x does not appear on the **System Components** page after the App Store is enabled. {{}} ## Use the App Store in a Multi-cluster Architecture -[In a multi-cluster architecture](../../multicluster-management/introduction/kubefed-in-kubesphere/), you have one Host Cluster (H Cluster) managing all Member Clusters (M Clusters). Different from other components in KubeSphere, the App Store serves as a global application pool for all clusters, including H Cluster and M Clusters. You only need to enable the App Store on the H Cluster and you can use functions related to the App Store on M Clusters directly (no matter whether the App Store is enabled on M Clusters or not), such as [app templates](../../project-user-guide/application/app-template/) and [app repositories](../../workspace-administration/app-repository/import-helm-repository/). +[In a multi-cluster architecture](../../multicluster-management/introduction/kubefed-in-kubesphere/), you have one Host Cluster (H Cluster) managing all Member Clusters (M Clusters). Different from other components in KubeSphere, the App Store serves as a global application pool for all clusters, including H Cluster and M Clusters. You only need to enable the App Store on the H Cluster and you can use functions related to the App Store on M Clusters directly (no matter whether the App Store is enabled on M Clusters or not), such as [App Templates](../../project-user-guide/application/app-template/) and [App Repositories](../../workspace-administration/app-repository/import-helm-repository/). However, if you only enable the App Store on M Clusters without enabling it on the H Cluster, you will not be able to use the App Store on any cluster in the multi-cluster architecture. diff --git a/content/en/docs/pluggable-components/auditing-logs.md b/content/en/docs/pluggable-components/auditing-logs.md index 63c798ad1..a01c2f57f 100644 --- a/content/en/docs/pluggable-components/auditing-logs.md +++ b/content/en/docs/pluggable-components/auditing-logs.md @@ -1,8 +1,8 @@ --- -title: "KubeSphere Auditing Logs" +title: "KubeSphere Audit Logs" keywords: "Kubernetes, auditing, KubeSphere, logs" description: "Learn how to enable Auditing to document platform events and activities." -linkTitle: "KubeSphere Auditing Logs" +linkTitle: "KubeSphere Audit Logs" weight: 6700 --- @@ -10,7 +10,7 @@ The KubeSphere Auditing Log System provides a security-relevant chronological se For more information, see [Auditing Log Query](../../toolbox/auditing/auditing-query/). -## Enable Auditing Logs before Installation +## Enable Auditing Logs Before Installation ### Installing on Linux @@ -34,7 +34,7 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ``` {{< notice note >}} -By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. +By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -45,7 +45,7 @@ By default, KubeKey will install Elasticsearch internally if Auditing is enabled elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -57,9 +57,9 @@ By default, KubeKey will install Elasticsearch internally if Auditing is enabled ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Auditing first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Auditing first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -73,7 +73,7 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu ``` {{< notice note >}} -By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. +By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -84,21 +84,21 @@ By default, ks-installer will install Elasticsearch internally if Auditing is en elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Auditing Logs after Installation +## Enable Auditing Logs After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -106,9 +106,9 @@ By default, ks-installer will install Elasticsearch internally if Auditing is en A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml auditing: @@ -116,7 +116,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource ``` {{< notice note >}} -By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. +By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Auditing, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -127,7 +127,7 @@ By default, Elasticsearch will be installed internally if Auditing is enabled. F elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -139,7 +139,7 @@ By default, Elasticsearch will be installed internally if Auditing is enabled. F {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component @@ -148,9 +148,7 @@ You can find the web kubectl tool by clicking }} -Verify that you can use the **Auditing Operating** function from the **Toolbox** in the bottom-right corner. - -![auditing-operating](/images/docs/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png) +Verify that you can use the **Audit Log Search** function from the **Toolbox** in the lower-right corner. {{}} diff --git a/content/en/docs/pluggable-components/devops.md b/content/en/docs/pluggable-components/devops.md index fd06550b4..f8fcc858c 100644 --- a/content/en/docs/pluggable-components/devops.md +++ b/content/en/docs/pluggable-components/devops.md @@ -12,7 +12,7 @@ The DevOps System offers an enabling environment for users as apps can be automa For more information, see [DevOps User Guide](../../devops-user-guide/). -## Enable DevOps before Installation +## Enable DevOps Before Installation ### Installing on Linux @@ -43,9 +43,9 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere DevOps first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere DevOps first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -61,14 +61,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable DevOps after Installation +## Enable DevOps After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -78,9 +78,9 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml devops: @@ -95,7 +95,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} @@ -105,9 +105,7 @@ You can find the web kubectl tool by clicking }} -Go to **Components** and check the status of **DevOps**. You may see an image as follows: - -![devops](/images/docs/enable-pluggable-components/kubesphere-devops-system/devops.png) +Go to **System Components** and check that all components on the **DevOps** tab page is in **Healthy** state. {{}} @@ -123,7 +121,7 @@ The output may look as follows if the component runs successfully: ```bash NAME READY STATUS RESTARTS AGE -ks-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m +devops-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m s2ioperator-0 1/1 Running 0 41m ``` diff --git a/content/en/docs/pluggable-components/events.md b/content/en/docs/pluggable-components/events.md index edff6744d..ce7927b25 100644 --- a/content/en/docs/pluggable-components/events.md +++ b/content/en/docs/pluggable-components/events.md @@ -6,11 +6,11 @@ linkTitle: "KubeSphere Events" weight: 6500 --- -KubeSphere events allow users to keep track of what is happening inside a cluster, such as node scheduling status and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. To query events, users can quickly launch the web Toolkit and enter related information in the search bar with different filters (e.g keyword and project) available. Events can also be archived to third-party tools, such as Elasticsearch, Kafka or Fluentd. +KubeSphere events allow users to keep track of what is happening inside a cluster, such as node scheduling status and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. To query events, users can quickly launch the web Toolkit and enter related information in the search bar with different filters (e.g keyword and project) available. Events can also be archived to third-party tools, such as Elasticsearch, Kafka, or Fluentd. For more information, see [Event Query](../../toolbox/events-query/). -## Enable Events before Installation +## Enable Events Before Installation ### Installing on Linux @@ -36,7 +36,7 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ``` {{< notice note >}} -By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. +By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -47,7 +47,7 @@ By default, KubeKey will install Elasticsearch internally if Events is enabled. elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -59,9 +59,9 @@ By default, KubeKey will install Elasticsearch internally if Events is enabled. ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Events first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Events first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -75,7 +75,7 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu ``` {{< notice note >}} -By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. +By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -86,21 +86,21 @@ By default, ks-installer will install Elasticsearch internally if Events is enab elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Events after Installation +## Enable Events After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -110,9 +110,9 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml events: @@ -121,7 +121,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. +By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Events, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -132,7 +132,7 @@ By default, Elasticsearch will be installed internally if Events is enabled. For elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -144,7 +144,7 @@ By default, Elasticsearch will be installed internally if Events is enabled. For {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} @@ -154,9 +154,7 @@ You can find the web kubectl tool by clicking }} -Verify that you can use the **Event Search** function from the **Toolbox** in the bottom-right corner. - -![event-search](/images/docs/enable-pluggable-components/kubesphere-events/event-search.png) +Verify that you can use the **Resource Event Search** function from the **Toolbox** in the lower-right corner. {{}} diff --git a/content/en/docs/pluggable-components/kubeedge.md b/content/en/docs/pluggable-components/kubeedge.md index 66523b07e..368263895 100644 --- a/content/en/docs/pluggable-components/kubeedge.md +++ b/content/en/docs/pluggable-components/kubeedge.md @@ -14,7 +14,7 @@ After you enable KubeEdge, you can [add edge nodes to your cluster](../../instal ![kubeedge_arch](/images/docs/enable-pluggable-components/kubeedge/kubeedge_arch.png) -## Enable KubeEdge before Installation +## Enable KubeEdge Before Installation ### Installing on Linux @@ -47,9 +47,9 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeEdge first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeEdge first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -67,14 +67,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 4. Save the file and execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable KubeEdge after Installation +## Enable KubeEdge After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -82,7 +82,7 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. 4. In this YAML file, navigate to `kubeedge.enabled` and enable it by setting it to `true`. @@ -91,13 +91,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource enabled: true # Change "false" to "true". ``` -5. Set the value of `kubeedge.cloudCore.cloudHub.advertiseAddress` to the public IP address of your cluster or an IP address that can be accessed by edge nodes. After you finish, click **Update** in the bottom-right corner to save the configuration. - - {{< notice note >}} - -The `kubeedge` section is not included in `cluster-configuration.yaml` if your cluster is upgraded from KubeSphere v3.0.0. For more information, see [how to enable KubeEdge after upgrade](#enable-kubeedge-after-upgrade). - - {{}} +5. Set the value of `kubeedge.cloudCore.cloudHub.advertiseAddress` to the public IP address of your cluster or an IP address that can be accessed by edge nodes. After you finish, click **OK** in the lower-right corner to save the configuration. 6. You can use the web kubectl to check the installation process by executing the following command: @@ -107,57 +101,16 @@ The `kubeedge` section is not included in `cluster-configuration.yaml` if your c {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} -## Enable KubeEdge after Upgrade - -If your KubeSphere v3.1.0 cluster is upgraded from KubeSphere v3.0.0, add the following content in `cluster-configuration.yaml` (i.e. the `clusterconfiguration` CRD) and enable `kubeedge` as shown [in the steps above](#enable-kubeedge-after-installation). - -```yaml - kubeedge: - enabled: false - cloudCore: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] - cloudhubPort: "10000" - cloudhubQuicPort: "10001" - cloudhubHttpsPort: "10002" - cloudstreamPort: "10003" - tunnelPort: "10004" - cloudHub: - advertiseAddress: - - "" - nodeLimit: "100" - service: - cloudhubNodePort: "30000" - cloudhubQuicNodePort: "30001" - cloudhubHttpsNodePort: "30002" - cloudstreamNodePort: "30003" - tunnelNodePort: "30004" - edgeWatcher: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] - edgeWatcherAgent: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] -``` - -{{< notice warning >}} - -Do not add the `kubeedge` section in `cluster-configuration.yaml` before the upgrade. - -{{}} - ## Verify the Installation of the Component {{< tabs >}} {{< tab "Verify the component on the dashboard" >}} -On the **Cluster Management** page, verify that the section **Edge Nodes** has appeared under **Node Management**. - -![edge-nodes](/images/docs/enable-pluggable-components/kubeedge/edge-nodes.png) +On the **Cluster Management** page, verify that the **Edge Nodes** module has appeared under **Nodes**. {{}} diff --git a/content/en/docs/pluggable-components/logging.md b/content/en/docs/pluggable-components/logging.md index 4de1186de..cc90148e7 100644 --- a/content/en/docs/pluggable-components/logging.md +++ b/content/en/docs/pluggable-components/logging.md @@ -6,11 +6,11 @@ linkTitle: "KubeSphere Logging System" weight: 6400 --- -KubeSphere provides a powerful, holistic and easy-to-use logging system for log collection, query and management. It covers logs at varied levels, including tenants, infrastructure resources, and applications. Users can search logs from different dimensions, such as project, workload, Pod and keyword. Compared with Kibana, the tenant-based logging system of KubeSphere features better isolation and security among tenants as tenants can only view their own logs. Apart from KubeSphere's own logging system, the container platform also allows users to add third-party log collectors, such as Elasticsearch, Kafka and Fluentd. +KubeSphere provides a powerful, holistic, and easy-to-use logging system for log collection, query, and management. It covers logs at varied levels, including tenants, infrastructure resources, and applications. Users can search logs from different dimensions, such as project, workload, Pod and keyword. Compared with Kibana, the tenant-based logging system of KubeSphere features better isolation and security among tenants as tenants can only view their own logs. Apart from KubeSphere's own logging system, the container platform also allows users to add third-party log collectors, such as Elasticsearch, Kafka, and Fluentd. For more information, see [Log Query](../../toolbox/log-query/). -## Enable Logging before Installation +## Enable Logging Before Installation ### Installing on Linux @@ -35,10 +35,14 @@ When you install KubeSphere on Linux, you need to create a configuration file, w ```yaml logging: enabled: true # Change "false" to "true". + containerruntime: docker ``` - {{< notice note >}} -By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. + {{< notice info >}}To use containerd as the container runtime, change the value of the field `containerruntime` to `containerd`. If you upgraded to KubeSphere 3.2.1 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + + {{}} + + {{< notice note >}}By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `config-sample.yaml` if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -49,7 +53,7 @@ By default, KubeKey will install Elasticsearch internally if Logging is enabled. elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -61,9 +65,9 @@ By default, KubeKey will install Elasticsearch internally if Logging is enabled. ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Logging first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Logging first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -74,10 +78,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu ```yaml logging: enabled: true # Change "false" to "true". + containerruntime: docker ``` - {{< notice note >}} -By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. + {{< notice info >}}To use containerd as the container runtime, change the value of the field `.logging.containerruntime` to `containerd`. If you upgraded to KubeSphere 3.2.1 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + + {{}} + + {{< notice note >}}By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in `cluster-configuration.yaml` if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one. {{}} ```yaml @@ -88,21 +96,21 @@ By default, ks-installer will install Elasticsearch internally if Logging is ena elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Logging after Installation +## Enable Logging After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -112,19 +120,23 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml logging: enabled: true # Change "false" to "true". + containerruntime: docker ``` - {{< notice note >}}By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. - + {{< notice info >}}To use containerd as the container runtime, change the value of the field `.logging.containerruntime` to `containerd`. If you upgraded to KubeSphere 3.2.1 from earlier versions, you have to manually add the field `containerruntime` under `logging` when enabling KubeSphere Logging system. + {{}} - + + {{< notice note >}}By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Logging, especially `externalElasticsearchHost` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one. + {{}} + ```yaml es: # Storage backend for logging, tracing, events and auditing. elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. @@ -133,7 +145,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -145,7 +157,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} @@ -155,9 +167,7 @@ You can find the web kubectl tool by clicking }} -Go to **Components** and check the status of **Logging**. You may see an image as follows: - -![logging](/images/docs/enable-pluggable-components/kubesphere-logging-system/logging.png) +Go to **System Components** and check that all components on the **Logging** tab page is in **Healthy** state. {{}} diff --git a/content/en/docs/pluggable-components/metrics-server.md b/content/en/docs/pluggable-components/metrics-server.md index 63bfdfbf3..8a54187e4 100644 --- a/content/en/docs/pluggable-components/metrics-server.md +++ b/content/en/docs/pluggable-components/metrics-server.md @@ -8,7 +8,7 @@ weight: 6910 KubeSphere supports Horizontal Pod Autoscalers (HPA) for [Deployments](../../project-user-guide/application-workloads/deployments/). In KubeSphere, the Metrics Server controls whether the HPA is enabled. You use an HPA object to autoscale a Deployment based on different types of metrics, such as CPU and memory utilization, as well as the minimum and maximum number of replicas. In this way, an HPA helps to make sure your application runs smoothly and consistently in different situations. -## Enable the Metrics Server before Installation +## Enable the Metrics Server Before Installation ### Installing on Linux @@ -39,9 +39,9 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Metrics Server first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Metrics Server first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -57,7 +57,7 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -67,9 +67,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu If you install KubeSphere on some cloud hosted Kubernetes engines, it is probable that the Metrics Server is already installed in your environment. In this case, it is not recommended that you enable it in `cluster-configuration.yaml` as it may cause conflicts during installation. {{}} -## Enable the Metrics Server after Installation +## Enable the Metrics Server After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -77,9 +77,9 @@ If you install KubeSphere on some cloud hosted Kubernetes engines, it is probabl A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `metrics_server` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `metrics_server` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml metrics_server: @@ -94,7 +94,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component diff --git a/content/en/docs/pluggable-components/network-policy.md b/content/en/docs/pluggable-components/network-policy.md index 21ca33e7c..4843e1efe 100644 --- a/content/en/docs/pluggable-components/network-policy.md +++ b/content/en/docs/pluggable-components/network-policy.md @@ -10,14 +10,14 @@ Starting from v3.0.0, users can configure network policies of native Kubernetes {{< notice note >}} -- Please make sure that the CNI network plugin used by the cluster supports Network Policies before you enable the feature. There are a number of CNI network plugins that support Network Policies, including Calico, Cilium, Kube-router, Romana and Weave Net. +- Please make sure that the CNI network plugin used by the cluster supports Network Policies before you enable the feature. There are a number of CNI network plugins that support Network Policies, including Calico, Cilium, Kube-router, Romana, and Weave Net. - It is recommended that you use [Calico](https://www.projectcalico.org/) as the CNI plugin before you enable Network Policies. {{}} For more information, see [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/). -## Enable the Network Policy before Installation +## Enable the Network Policy Before Installation ### Installing on Linux @@ -49,9 +49,9 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Network Policy first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable the Network Policy first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -68,14 +68,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable the Network Policy after Installation +## Enable the Network Policy After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -83,9 +83,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `network.networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `network.networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml network: @@ -101,11 +101,9 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component -If you can see **Network Policies** in **Network** as the image below, it means the installation succeeds as this part won't display until you install the component. - -![networkpolicy](/images/docs/enable-pluggable-components/network-policies/networkpolicy.png) \ No newline at end of file +If you can see the **Network Policies** module in **Network**, it means the installation is successful as this part won't display until you install the component. \ No newline at end of file diff --git a/content/en/docs/pluggable-components/pod-ip-pools.md b/content/en/docs/pluggable-components/pod-ip-pools.md index 995630db7..195278fd5 100644 --- a/content/en/docs/pluggable-components/pod-ip-pools.md +++ b/content/en/docs/pluggable-components/pod-ip-pools.md @@ -1,14 +1,14 @@ --- title: "Pod IP Pools" -keywords: "Kubernetes, KubeSphere, Pod, IP Pools" -description: "Learn how to enable Pod IP Pools to assign a specific Pod IP Pool to your Pods." +keywords: "Kubernetes, KubeSphere, Pod, IP pools" +description: "Learn how to enable Pod IP Pools to assign a specific Pod IP pool to your Pods." linkTitle: "Pod IP Pools" weight: 6920 --- -A Pod IP Pool is used to manage the Pod network address space, and the address space between each Pod IP Pool cannot overlap. When you create a workload, you can select a specific Pod IP Pool, so that created Pods will be assigned IP addresses from this Pod IP Pool. +A Pod IP pool is used to manage the Pod network address space, and the address space between each Pod IP pool cannot overlap. When you create a workload, you can select a specific Pod IP pool, so that created Pods will be assigned IP addresses from this Pod IP pool. -## Enable Pod IP Pools before Installation +## Enable Pod IP Pools Before Installation ### Installing on Linux @@ -21,7 +21,7 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c ``` {{< notice note >}} - If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Pod IP Pools in this mode (for example, for testing purposes), refer to [the following section](#enable-pod-ip-pools-after-installation) to see how Pod IP Pools can be installed after installation. + If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Pod IP Pools in this mode (for example, for testing purposes), refer to [the following section](#enable-pod-ip-pools-after-installation) to see how Pod IP pools can be installed after installation. {{}} 2. In this file, navigate to `network.ippool.type` and change `none` to `calico`. Save the file after you finish. @@ -40,9 +40,9 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Pod IP Pools first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Pod IP Pools first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -59,15 +59,15 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Pod IP Pools after Installation +## Enable Pod IP Pools After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -75,9 +75,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `network` and change `network.ippool.type` to `calico`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `network` and change `network.ippool.type` to `calico`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml network: @@ -93,14 +93,12 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component -On the **Cluster Management** page, verify that you can see the section **Pod IP Pools** under **Network**. - -![pod-ip-pool](/images/docs/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png) +On the **Cluster Management** page, verify that you can see the **Pod IP Pools** module under **Network**. diff --git a/content/en/docs/pluggable-components/service-mesh.md b/content/en/docs/pluggable-components/service-mesh.md index 29f2df4d8..364909eaa 100644 --- a/content/en/docs/pluggable-components/service-mesh.md +++ b/content/en/docs/pluggable-components/service-mesh.md @@ -6,11 +6,11 @@ linkTitle: "KubeSphere Service Mesh" weight: 6800 --- -On the basis of [Istio](https://istio.io/), KubeSphere Service Mesh visualizes microservices governance and traffic management. It features a powerful toolkit including **circuit breaking, blue-green deployment, canary release, traffic mirroring, distributed tracing, observability and traffic control**. Developers can easily get started with KubeSphere Service Mesh without any code hacking, with the learning curve of Istio greatly reduced. All features of KubeSphere Service Mesh are designed to meet users' demand for their business. +On the basis of [Istio](https://istio.io/), KubeSphere Service Mesh visualizes microservices governance and traffic management. It features a powerful toolkit including **circuit breaking, blue-green deployment, canary release, traffic mirroring, tracing, observability, and traffic control**. Developers can easily get started with KubeSphere Service Mesh without any code hacking, with the learning curve of Istio greatly reduced. All features of KubeSphere Service Mesh are designed to meet users' demand for their business. For more information, see [Grayscale Release](../../project-user-guide/grayscale-release/overview/). -## Enable KubeSphere Service Mesh before Installation +## Enable KubeSphere Service Mesh Before Installation ### Installing on Linux @@ -41,9 +41,9 @@ If you adopt [All-in-One Installation](../../quick-start/all-in-one-on-linux/), ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Service Mesh first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable KubeSphere Service Mesh first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -59,14 +59,14 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable KubeSphere Service Mesh after Installation +## Enable KubeSphere Service Mesh After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -74,9 +74,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml servicemesh: @@ -91,7 +91,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component @@ -100,9 +100,7 @@ You can find the web kubectl tool by clicking }} -Go to **Components** and check the status of **Istio**. You may see an image as follows: - -![istio](/images/docs/enable-pluggable-components/kubesphere-service-mesh/istio.png) +Go to **System Components** and check that all components on the **Istio** tab page is in **Healthy** state. {{}} diff --git a/content/en/docs/pluggable-components/service-topology.md b/content/en/docs/pluggable-components/service-topology.md index aa1c9b1dc..db636cfa6 100644 --- a/content/en/docs/pluggable-components/service-topology.md +++ b/content/en/docs/pluggable-components/service-topology.md @@ -8,7 +8,7 @@ weight: 6915 You can enable Service Topology to integrate [Weave Scope](https://www.weave.works/oss/scope/), a visualization and monitoring tool for Docker and Kubernetes. Weave Scope uses established APIs to collect information to build a topology of your apps and containers. The Service topology displays in your project, providing you with visual representations of connections based on traffic. -## Enable Service Topology before Installation +## Enable Service Topology Before Installation ### Installing on Linux @@ -40,9 +40,9 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c ### Installing on Kubernetes -As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Service Topology first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) file. +As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you can enable Service Topology first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) file. -1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) and edit it. +1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) and edit it. ```bash vi cluster-configuration.yaml @@ -59,15 +59,15 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu 3. Execute the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -## Enable Service Topology after Installation +## Enable Service Topology After Installation -1. Log in to the console as `admin`. Click **Platform** in the top-left corner and select **Cluster Management**. +1. Log in to the console as `admin`. Click **Platform** in the upper-left corner and select **Cluster Management**. 2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detail page. @@ -75,9 +75,9 @@ As you [install KubeSphere on Kubernetes](../../installing-on-kubernetes/introdu A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, navigate to `network` and change `network.topology.type` to `weave-scope`. After you finish, click **Update** in the bottom-right corner to save the configuration. +4. In this YAML file, navigate to `network` and change `network.topology.type` to `weave-scope`. After you finish, click **OK** in the lower-right corner to save the configuration. ```yaml network: @@ -93,7 +93,7 @@ A Custom Resource Definition (CRD) allows users to create a new type of resource {{< notice note >}} -You can find the web kubectl tool by clicking in the bottom-right corner of the console. +You can find the web kubectl tool by clicking in the lower-right corner of the console. {{}} ## Verify the Installation of the Component @@ -102,9 +102,7 @@ You can find the web kubectl tool by clicking }} -Go to one of your project, navigate to **Services** under **Application Workloads**, and you can see a topology of your **Services** on the **Topology** tab. - -![topology](/images/docs/enable-pluggable-components/service-topology/topology.png) +Go to one of your project, navigate to **Services** under **Application Workloads**, and you can see a topology of your Services on the **Service Topology** tab page. {{}} diff --git a/content/en/docs/pluggable-components/uninstall-pluggable-components.md b/content/en/docs/pluggable-components/uninstall-pluggable-components.md index caa23e46e..3226d27a2 100644 --- a/content/en/docs/pluggable-components/uninstall-pluggable-components.md +++ b/content/en/docs/pluggable-components/uninstall-pluggable-components.md @@ -1,8 +1,8 @@ --- -title: "Uninstall Pluggable Components from KubeSphere v3.1.x" +title: "Uninstall Pluggable Components from KubeSphere 3.2.x" keywords: "Installer, uninstall, KubeSphere, Kubernetes" -description: "Learn how to uninstall each pluggable component in KubeSphere v3.1.x." -linkTitle: "Uninstall Pluggable Components from KubeSphere v3.1.x" +description: "Learn how to uninstall each pluggable component in KubeSphere 3.2.x." +linkTitle: "Uninstall Pluggable Components from KubeSphere 3.2.x" Weight: 6940 --- @@ -10,7 +10,7 @@ After you [enable the pluggable components of KubeSphere](../../pluggable-compon {{< notice note >}} -The methods of uninstalling certain pluggable components on KubeSphere v3.1.x are different from the methods on KubeSphere v3.0.0. For more information about the uninstallation methods on KubeSphere v3.0.0, see [Uninstall Pluggable Components from KubeSphere](https://v3-0.docs.kubesphere.io/docs/faq/installation/uninstall-pluggable-components/). +The methods of uninstalling certain pluggable components on KubeSphere 3.2.x are different from the methods on KubeSphere v3.0.0. For more information about the uninstallation methods on KubeSphere v3.0.0, see [Uninstall Pluggable Components from KubeSphere](https://v3-0.docs.kubesphere.io/docs/faq/installation/uninstall-pluggable-components/). {{}} @@ -40,55 +40,30 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins ## Uninstall KubeSphere DevOps -1. Change the value of `devops.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. - -2. Run the command mentioned in [Prerequisites](#prerequisites) and then delete the code under `status.devops` in `ks-installer` of the CRD `ClusterConfiguration`. - -3. Run the following commands: +1. To uninstall DevOps: ```bash - helm -n kubesphere-devops-system delete ks-jenkins - helm -n kubesphere-devops-system delete uc + helm uninstall -n kubesphere-devops-system devops + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "remove", "path": "/status/devops"}]' + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "replace", "path": "/spec/devops/enabled", "value": false}]' ``` +2. To delete DevOps resources: ```bash - # Delete DevOps projects - for devopsproject in `kubectl get devopsprojects -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch devopsprojects $devopsproject -p '{"metadata":{"finalizers":null}}' --type=merge + # Remove all resources related with DevOps + for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do + for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do + for devops_res in $(kubectl get $devops_crd -n $ns -oname); do + kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge + done + done done - - for pip in `kubectl get pipeline -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch pipeline $pip -n `kubectl get pipeline -A | grep $pip | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibinaries in `kubectl get s2ibinaries -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibinaries $s2ibinaries -n `kubectl get s2ibinaries -A | grep $s2ibinaries | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibuilders in `kubectl get s2ibuilders -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibuilders $s2ibuilders -n `kubectl get s2ibuilders -A | grep $s2ibuilders | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibuildertemplates in `kubectl get s2ibuildertemplates -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibuildertemplates $s2ibuildertemplates -n `kubectl get s2ibuildertemplates -A | grep $s2ibuildertemplates | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2iruns in `kubectl get s2iruns -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2iruns $s2iruns -n `kubectl get s2iruns -A | grep $s2iruns | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - kubectl delete devopsprojects --all 2>/dev/null + # Remove all DevOps CRDs + kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io" | xargs -I crd_name kubectl delete crd crd_name + # Remove DevOps namespace + kubectl delete namespace kubesphere-devops-system ``` - ```bash - kubectl delete ns kubesphere-devops-system - ``` ## Uninstall KubeSphere Logging @@ -97,7 +72,7 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins 2. To disable only log collection: ```bash - delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail + kubectl delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail ``` {{< notice note >}} @@ -118,11 +93,18 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins helm uninstall elasticsearch-logging --namespace kubesphere-logging-system ``` - {{< notice note >}} + {{< notice warning >}} This operation may cause anomalies in Auditing, Events, and Service Mesh. {{}} + +4. Run the following command: + + ```bash + kubectl delete deployment logsidecar-injector-deploy -n kubesphere-logging-system + kubectl delete ns kubesphere-logging-system + ``` ## Uninstall KubeSphere Events @@ -146,7 +128,7 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins {{< notice note >}} - Notification is installed in KubeSphere v3.1.x by default, so you do not need to uninstall it. + Notification is installed in KubeSphere 3.2.1 by default, so you do not need to uninstall it. {{}} @@ -159,8 +141,8 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins ```bash helm uninstall kube-auditing -n kubesphere-logging-system - kubectl delete crd awh - kubectl delete crd ar + kubectl delete crd rules.auditing.kubesphere.io + kubectl delete crd webhooks.auditing.kubesphere.io ``` ## Uninstall KubeSphere Service Mesh diff --git a/content/en/docs/project-administration/container-limit-ranges.md b/content/en/docs/project-administration/container-limit-ranges.md index 17a81120f..8fa82fa9d 100644 --- a/content/en/docs/project-administration/container-limit-ranges.md +++ b/content/en/docs/project-administration/container-limit-ranges.md @@ -14,16 +14,14 @@ This tutorial demonstrates how to set default limit ranges for containers in a p ## Prerequisites -You have an available workspace, a project and an account (`project-admin`). The account must have the `admin` role at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +You have an available workspace, a project and a user (`project-admin`). The user must have the `admin` role at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Set Default Limit Ranges -1. Log in to the console as `project-admin` and go to a project. On the **Overview** page, you can see default limit ranges remain unset if the project is newly created. Click **Set** next to **Resource Default Request Not Set** to configure limit ranges. +1. Log in to the console as `project-admin` and go to a project. On the **Overview** page, you can see default limit ranges remain unset if the project is newly created. Click **Edit Quotas** next to **Default Container Quotas Not Set** to configure limit ranges. 2. In the dialog that appears, you can see that KubeSphere does not set any requests or limits by default. To set requests and limits to control CPU and memory resources, use the slider to move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. - ![default-limit-range](/images/docs/project-administration/container-limit-ranges/default-limit-range.png) - {{< notice note >}} The limit can never be lower than the request. @@ -34,18 +32,12 @@ You have an available workspace, a project and an account (`project-admin`). The 4. Go to **Basic Information** in **Project Settings**, and you can see default limit ranges for containers in a project. - ![view-limit-ranges](/images/docs/project-administration/container-limit-ranges/view-limit-ranges.png) - -5. To change default limit ranges, click **Manage Project** on the **Basic Information** page and select **Edit Resource Default Request**. +5. To change default limit ranges, click **Edit Project** on the **Basic Information** page and select **Edit Default Container Quotas**. 6. Change limit ranges directly in the dialog and click **OK**. 7. When you create a workload, requests and limits of the container will be pre-populated with values. - - ![workload-values](/images/docs/project-administration/container-limit-ranges/workload-values.png) - {{< notice note >}} - For more information, see **Resource Request** in [Container Image Settings](../../project-user-guide/application-workloads/container-image-settings/). {{}} diff --git a/content/en/docs/project-administration/disk-log-collection.md b/content/en/docs/project-administration/disk-log-collection.md index b0b0549c4..0491061c5 100644 --- a/content/en/docs/project-administration/disk-log-collection.md +++ b/content/en/docs/project-administration/disk-log-collection.md @@ -1,26 +1,25 @@ --- -title: "Disk Log Collection" +title: "Log Collection" keywords: 'KubeSphere, Kubernetes, project, disk, log, collection' -description: 'Enable disk log collection so that you can collect, manage, and analyze logs in a unified way.' -linkTitle: "Disk Log Collection" +description: 'Enable log collection so that you can collect, manage, and analyze logs in a unified way.' +linkTitle: "Log Collection" weight: 13600 --- KubeSphere supports multiple log collection methods so that Ops teams can collect, manage, and analyze logs in a unified and flexible way. -This tutorial demonstrates how to collect disk logs for an example app. +This tutorial demonstrates how to collect logs for an example app. ## Prerequisites -- You need to create a workspace, a project and an account (`project-admin`). The account must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-admin`). The user must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). - You need to enable [the KubeSphere Logging System](../../pluggable-components/logging/). -## Enable Disk Log Collection +## Enable Log Collection 1. Log in to the web console of KubeSphere as `project-admin` and go to your project. -2. From the left navigation bar, select **Advanced Settings** in **Project Settings**. Under **Disk Log Collection**, click to enable the feature. - +2. From the left navigation bar, click **Log Collection** in **Project Settings**, and then click to enable the feature. ## Create a Deployment @@ -28,15 +27,13 @@ This tutorial demonstrates how to collect disk logs for an example app. 2. In the dialog that appears, set a name for the Deployment (for example, `demo-deployment`) and click **Next**. -3. Under **Container Image**, click **Add Container Image**. +3. Under **Containers**, click **Add Container**. 4. Enter `alpine` in the search bar to use the image (tag: `latest`) as an example. - ![alpine-image](/images/docs/project-administration/disk-log-collection/alpine-image.png) +5. Scroll down to **Start Command** and select the checkbox. Enter the following values for **Command** and **Parameters** respectively, click **√**, and then click **Next**. -5. Scroll down to **Start Command** and select the checkbox. Enter the following values for **Run Command** and **Parameters** respectively, click **√**, and then click **Next**. - - **Run Command** + **Command** ```bash /bin/sh @@ -54,15 +51,11 @@ This tutorial demonstrates how to collect disk logs for an example app. {{}} - ![run-command](/images/docs/project-administration/disk-log-collection/run-command.png) +6. On the **Volume Settings** tab, click to enable **Collect Logs on Volumes** and click **Mount Volume**. -6. On the **Mount Volumes** tab, click to enable **Disk Log Collection** and click **Add Volume**. +7. On the **Temporary Volume** tab, enter a name for the volume (for example, `demo-disk-log-collection`) and set the access mode and path. -7. On the **Temporary Volume** tab, enter a name for the volume (for example, `demo-disk-log-collection`) and set the access mode and path. Refer to the image below as an example. - - ![volume-example](/images/docs/project-administration/disk-log-collection/volume-example.png) - - Click **√**, and then click **Next** to continue. + Click **√**, and then click **Next**. 8. Click **Create** in **Advanced Settings** to finish the process. @@ -76,9 +69,7 @@ This tutorial demonstrates how to collect disk logs for an example app. 1. Under the **Deployments** tab, click the Deployment just created to go to its detail page. -2. In **Resource Status**, you can click to view container details, and then click of `logsidecar-container` (filebeat container) to view disk logs. +2. In **Resource Status**, you can click to view container details, and then click of `logsidecar-container` (filebeat container) to view logs. -3. Alternatively, you can also click in the lower-right corner and select **Log Search** to view stdout logs. For example, use the Pod name of the Deployment for a fuzzy query: - - ![fuzzy-match](/images/docs/project-administration/disk-log-collection/fuzzy-match.png) +3. Alternatively, you can also click in the lower-right corner and select **Log Search** to view stdout logs. For example, use the Pod name of the Deployment for a fuzzy query. diff --git a/content/en/docs/project-administration/project-and-multicluster-project.md b/content/en/docs/project-administration/project-and-multicluster-project.md index d6ab0e1cf..f9ac50c35 100644 --- a/content/en/docs/project-administration/project-and-multicluster-project.md +++ b/content/en/docs/project-administration/project-and-multicluster-project.md @@ -14,7 +14,7 @@ This tutorial demonstrates how to manage projects and multi-cluster projects. ## Prerequisites -- You need to create a workspace and an account (`project-admin`). The account must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../docs/quick-start/create-workspace-and-project/). +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../docs/quick-start/create-workspace-and-project/). - You must enable the multi-cluster feature through [Direction Connection](../../multicluster-management/enable-multicluster/direct-connection/) or [Agent Connection](../../multicluster-management/enable-multicluster/agent-connection/) before you create a multi-cluster project. ## Projects @@ -25,34 +25,30 @@ This tutorial demonstrates how to manage projects and multi-cluster projects. {{< notice note >}} -- You can change the cluster where the project will be created on the **Cluster** drop-down list. The list is only visible after you enable the multi-cluster feature. +- You can change the cluster where the project will be created on the **Cluster** drop-down menu. The list is only visible after you enable the multi-cluster feature. - If you cannot see the **Create** button, it means no cluster is available to use for your workspace. You need to contact the platform administrator or cluster administrator so that workspace resources can be created in the cluster. [To assign a cluster to a workspace](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/), the platform administrator or cluster administrator needs to edit **Cluster Visibility** on the **Cluster Management** page. {{}} -2. In the **Create Project** window that appears, enter a project name and add an alias or description if necessary. Under **Cluster Settings**, select the cluster where the project will be created (this option does not appear if the multi-cluster feature is not enabled), and click **OK**. +2. In the **Create Project** window that appears, enter a project name and add an alias or description if necessary. Under **Cluster**, select the cluster where the project will be created (this option does not appear if the multi-cluster feature is not enabled), and click **OK**. -3. A project created will display in the list as shown below. You can click the project name to go to its **Overview** page. - - ![project-list](/images/docs/project-administration/project-and-multicluster-project/project-list.png) +3. A project created will display in the list. You can click the project name to go to its **Overview** page. ### Edit a project -1. Go to your project, navigate to **Basic Information** under **Project Settings** and click **Manage Project** on the right. +1. Go to your project, navigate to **Basic Information** under **Project Settings** and click **Manage** on the right. 2. Choose **Edit Information** from the drop-down menu. - - ![project-basic-information](/images/docs/project-administration/project-and-multicluster-project/project-basic-information.png) - + {{< notice note >}} The project name cannot be edited. If you want to change other information, see relevant tutorials in the documentation. {{}} -3. To delete a project, choose **Delete Project** from the drop-down menu. In the dialog that appears, enter the project name and click **OK** to confirm the deletion. +3. To delete a project, choose **Delete** from the drop-down menu. In the dialog that appears, enter the project name and click **OK** to confirm the deletion. - {{< notice warning >}} +{{< notice warning >}} A project cannot be recovered once deleted and resources in the project will be removed. @@ -71,20 +67,19 @@ A project cannot be recovered once deleted and resources in the project will be {{}} -2. In the **Create Multi-cluster Project** window that appears, enter a project name and add an alias or description if necessary. Under **Cluster Settings**, select multiple clusters for your project by clicking **Add Cluster**, and click **OK**. +2. In the **Create Multi-cluster Project** window that appears, enter a project name and add an alias or description if necessary. Under **Clusters**, select multiple clusters for your project by clicking **Add Cluster**, and then click **OK**. +3. A multi-cluster project created is displayed in the list. Click on the right of a multi-cluster project to select an operation from the drop-down menu: -3. A multi-cluster project created will display in the list as shown below. You can click the project name to go to its **Overview** page. - - ![multi-cluster-list](/images/docs/project-administration/project-and-multicluster-project/multi-cluster-list.png) + - **Edit Information**: Edit the basic information of a multi-cluster project. + - **Add Cluster**: Select a cluster from the drop-down list in the displayed dialog box and click **OK** to add a cluster to a multi-cluster project. + - **Delete**: Delete a multi-cluster project. ### Edit a multi-cluster project -1. Go to your multi-cluster project, navigate to **Basic Information** under **Project Settings** and click **Manage Project** on the right. +1. Go to your multi-cluster project, navigate to **Basic Information** under **Project Settings** and click **Manage** on the right. 2. Choose **Edit Information** from the drop-down menu. - ![multi-cluster-basic-information](/images/docs/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png) - {{< notice note >}} The project name cannot be edited. If you want to change other information, see relevant tutorials in the documentation. @@ -93,7 +88,7 @@ The project name cannot be edited. If you want to change other information, see 3. To delete a multi-cluster project, choose **Delete Project** from the drop-down menu. In the dialog that appears, enter the project name and click **OK** to confirm the deletion. - {{< notice warning >}} +{{< notice warning >}} A multi-cluster project cannot be recovered once deleted and resources in the project will be removed. diff --git a/content/en/docs/project-administration/project-gateway.md b/content/en/docs/project-administration/project-gateway.md index 9b05e4402..d87c3bc94 100644 --- a/content/en/docs/project-administration/project-gateway.md +++ b/content/en/docs/project-administration/project-gateway.md @@ -8,38 +8,34 @@ weight: 13500 A gateway in a KubeSphere project is an [NGINX Ingress controller](https://www.nginx.com/products/nginx/kubernetes-ingress-controller). KubeSphere has a built‑in configuration for HTTP load balancing, called [Routes](../../project-user-guide/application-workloads/routes/). A Route defines rules for external connections to Services within a cluster. Users who need to provide external access to their Services create a Route resource that defines rules, including the URI path, backing service name, and other information. -In KubeSphere 3.0, a project gateway works independently for itself. In other words, every project has its own Ingress controller. In the next release, KubeSphere will provide a cluster-scope gateway in addition to the project-scope gateway, allowing all projects to share the same gateway. +In addition to project gateways, KubeSphere also supports [cluster-scope gateway](../../cluster-administration/cluster-settings/cluster-gateway/) to let all projects share a global gateway. -This tutorial demonstrates how to set a gateway in KubeSphere for the external access to Services and Routes. +This tutorial demonstrates how to enable a project gateway on KubeSphere for external access to Services and Routes. ## Prerequisites -You need to create a workspace, a project and an account (`project-admin`). The account must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../docs/quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-admin`). The user must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../../docs/quick-start/create-workspace-and-project/). -## Set a Gateway +## Enable a Gateway -1. Log in to the KubeSphere web console as `project-admin` and go to your project. In **Project Settings** from the navigation bar, select **Advanced Settings** and click **Set Gateway**. +1. Log in to the KubeSphere web console as `project-admin` and go to your project. In **Project Settings** from the navigation bar, click **Gateway Settings**. - ![set-project-gateway](/images/docs/project-administration/project-gateway/set-project-gateway.jpg) - -2. In the pop-up window, you can select two access modes for the gateway. - - ![access-method](/images/docs/project-administration/project-gateway/access-method.png) +2. Click **Enable Gateway**. In the pop-up window, you can select two access modes for the gateway. **NodePort**: You can access Services with corresponding node ports through the gateway. **LoadBalancer**: You can access Services with a single IP address through the gateway. -3. You can also enable **Application Governance** on the **Set Gateway** page. You need to enable **Application Governance** so that you can use the Tracing feature and use [different grayscale release strategies](../../project-user-guide/grayscale-release/overview/). Once it is enabled, check whether an annotation (for example, `nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route (Ingress) if the route is inaccessible. +3. You can also enable **Tracing** on the **Enable Gateway** page. You have to turn on **Application Governance** when you create composed applications so that you can use the Tracing feature and use [different grayscale release strategies](../../project-user-guide/grayscale-release/overview/). Once it is enabled, check whether an annotation (for example, `nginx.ingress.kubernetes.io/service-upstream: true`) is added for your route (Ingress) if the route is inaccessible. -4. After you select an access method, click **Save**. +3. In **Configuration Options**, add key-value pairs to provide configurations for system components of NGINX Ingress controller. For more information, see [NGINX Ingress Controller documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options). + +4. After you select an access method, click **OK**. ## NodePort If you select **NodePort**, KubeSphere will set a port for http and https requests respectively. You can access your Service at `EIP:NodePort` or `Hostname:NodePort`. -![nodeport](/images/docs/project-administration/project-gateway/nodeport.jpg) - For example, to access your Service with an elastic IP address (EIP), visit: - `http://EIP:32734` @@ -63,10 +59,8 @@ When you create a [Route](../../project-user-guide/application-workloads/routes/ You must configure a load balancer in advance before you select **LoadBalancer**. The IP address of the load balancer will be bound to the gateway to provide access to internal Services and Routes. -![lb](/images/docs/project-administration/project-gateway/lb.png) - {{< notice note >}} -Cloud providers often support load balancer plugins. If you install KubeSphere on major Kubernetes engines on their platforms, you may notice a load balancer is already available in the environment for you to use. If you install KubeSphere in a bare metal environment, you can use [PorterLB](https://github.com/kubesphere/porter) for load balancing. +Cloud providers often support load balancer plugins. If you install KubeSphere on major Kubernetes engines on their platforms, you may notice a load balancer is already available in the environment for you to use. If you install KubeSphere in a bare metal environment, you can use [OpenELB](https://github.com/kubesphere/openelb) for load balancing. {{}} \ No newline at end of file diff --git a/content/en/docs/project-administration/project-network-isolation.md b/content/en/docs/project-administration/project-network-isolation.md index 6806efea4..9624aef77 100644 --- a/content/en/docs/project-administration/project-network-isolation.md +++ b/content/en/docs/project-administration/project-network-isolation.md @@ -11,7 +11,7 @@ KubeSphere project network isolation lets project administrators enforce which n ## Prerequisites - You have already enabled [Network Policies](../../pluggable-components/network-policy/). -- You must have an available project and an account of the `admin` role (`project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +- You must have an available project and a user of the `admin` role (`project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). {{< notice note >}} @@ -23,9 +23,7 @@ For the implementation of the Network Policy, you can refer to [KubeSphere Netwo 1. Log in to KubeSphere as `project-admin`. Go to your project and select **Network Isolation** in **Project Settings**. By default, project network isolation is disabled. - ![project-network-isolation](/images/docs/project-administration/project-network-isolation/project-network-isolation.png) - -2. To enable project network isolation, click **On**. +2. To enable project network isolation, click **Enable**. {{< notice note >}} @@ -33,9 +31,8 @@ For the implementation of the Network Policy, you can refer to [KubeSphere Netwo {{}} -3. You can also disable network isolation on this page. +3. You can also disable network isolation by toggling the **Enabled** button on this page. - ![isolation-off](/images/docs/project-administration/project-network-isolation/isolation-off.png) {{< notice note >}} @@ -61,21 +58,15 @@ For more information about how to create workloads, see [Deployments](../../proj #### Allow ingress traffic from workloads in a different project -1. On the **Network Isolation** page of your current project, select **Cluster Internal Allowlist**. +1. On the **Network Isolation** page of your current project, click **Internal Allowlist**. -2. Click **Add Allowlist**. +2. Click **Add Allowlist Entry**. -3. Select **Ingress** under **Direction**. +3. Select **Ingress** under **Traffic Direction**. -4. Select the tab **Project** under **Type**. +4. In **Project**, select the project `demo-project-2`. -5. Select the project `demo-project-2`. - - ![ingress-rule](/images/docs/project-administration/project-network-isolation/ingress-rule.png) - -6. Click **OK** and you can see that the project is now in the allowlist. - - ![ingress-rule-added](/images/docs/project-administration/project-network-isolation/ingress-rule-added.png) +5. Click **OK**, and then you can see that the project is now in the allowlist. {{< notice note >}} @@ -85,11 +76,11 @@ If the network is not accessible after you set the network policy, then you need #### Allow egress traffic to Services in a different project -1. On the **Network Isolation** page of your current project, select **Cluster Internal Allowlist**. +1. On the **Network Isolation** page of your current project, click **Internal Allowlist**. -2. Click **Add Allowlist**. +2. Click **Add Allowlist Entry**. -3. Select **Egress** under **Direction**. +3. Select **Egress** under **Traffic Direction**. 4. Select the tab **Service** under **Type**. @@ -97,11 +88,7 @@ If the network is not accessible after you set the network policy, then you need 6. Select the Service that is allowed to receive egress traffic. In this case, select `nginx`. - ![engress-rule](/images/docs/project-administration/project-network-isolation/engress-rule.png) - -7. Click **OK** and you can see that the Service is now in the allowlist. - - ![egress-rule-added](/images/docs/project-administration/project-network-isolation/egress-rule-added.png) +7. Click **OK**, and then you can see that the Service is now in the allowlist. {{< notice note >}} @@ -113,21 +100,17 @@ When creating a Service, you must make sure that the selectors of the Service ar KubeSphere uses CIDR to distinguish between peers. Assume a Tomcat Deployment workload has been created in your current project and is exposed via the `NodePort` Service `demo-service` on the NodePort `80` with `TCP`. For an external client with the IP address `192.168.1.1` to access this Service, you need to add a rule for it. -#### Allow ingress traffic from an client outside the cluster +#### Allow ingress traffic from a client outside the cluster -1. On the **Network Isolation** page of your current project, select **Cluster External IP Address** and click **Add Rule**. +1. On the **Network Isolation** page of your current project, select **External Allowlist** and click **Add Allowlist Entry**. -2. Select **Ingress** under **Direction**. +2. Select **Ingress** under **Traffic Direction**. -3. Enter `192.168.1.1/32` for **CIDR**. +3. Enter `192.168.1.1/32` for **Network Segment**. 4. Select the protocol `TCP` and enter `80` as the port number. - ![ingress-CIDR](/images/docs/project-administration/project-network-isolation/ingress-CIDR.png) - -5. Click **OK** and you can see that the rule has been added. - - ![ingress-cidr-set](/images/docs/project-administration/project-network-isolation/ingress-cidr-set.png) +5. Click **OK**, and then you can see that the rule has been added. {{< notice note >}} @@ -139,19 +122,15 @@ Assume the IP address of an external client is `http://10.1.0.1:80`, then you ne #### Allow egress traffic to Services outside the cluster -1. On the **Network Isolation** page of your current project, select **Cluster External IP Address** and click **Add Rule**. +1. On the **Network Isolation** page of your current project, select **External Allowlist** and click **Add Allowlist Entry**. -2. Select **Egress** under **Direction**. +2. Select **Egress** under **Traffic Direction**. -3. Enter `10.1.0.1/32` for **CIDR**. +3. Enter `10.1.0.1/32` for **Network Segment**. 4. Select the protocol `TCP` and enter `80` as the port number. - ![egress-CIDR](/images/docs/project-administration/project-network-isolation/egress-CIDR.png) - -5. Click **OK** and you can see that the rule has been added. - - ![egress-CIDR-added](/images/docs/project-administration/project-network-isolation/egress-CIDR-added.png) +5. Click **OK**, and you can see that the rule has been added. {{< notice note >}} @@ -171,9 +150,9 @@ If egress traffic is controlled, you should have a clear plan of what projects, ## FAQs -Q: Why can't the custom monitoring system of KubeSphere get data after I enabled network isolation? +Q: Why cannot the custom monitoring system of KubeSphere get data after I enabled network isolation? -A: After you enabled custom monitoring, the KubeSphere monitoring system will access the metrics of the Pod. You need to allow ingress traffic for the KubeSphere monitoring system. Otherwise, it cannot access Pod metrics. +A: After you enable custom monitoring, the KubeSphere monitoring system will access the metrics of the Pod. You need to allow ingress traffic for the KubeSphere monitoring system. Otherwise, it cannot access Pod metrics. KubeSphere provides a configuration item `allowedIngressNamespaces` to simplify similar configurations, which allows all projects listed in the configuration. @@ -197,7 +176,7 @@ spec: ... ``` -Q: Why can't I access a Service even after setting a network policy through the Service? +Q: Why cannot I access a Service even after setting a network policy through the Service? A: When you add a network policy and access the Service via the cluster IP address, if the network is not working, check the kube-proxy configuration to see if `masqueradeAll` is `false`. @@ -222,6 +201,6 @@ A: When you add a network policy and access the Service via the cluster IP addre ... ``` -Q: How do I determine the CIDR when I set the ingress rule? +Q: How do I determine the network segment when I set the ingress rule? A: In Kubernetes, the source IP address of the packet is often handled by NAT, so you need to figure out what the source address of the packet will be before you add the rule. For more information, refer to [Source IP](https://github.com/kubesphere/community/blob/master/sig-network/concepts-and-designs/kubesphere-network-policy.md#source-ip). diff --git a/content/en/docs/project-administration/role-and-member-management.md b/content/en/docs/project-administration/role-and-member-management.md index afc6b8766..8f16a866c 100644 --- a/content/en/docs/project-administration/role-and-member-management.md +++ b/content/en/docs/project-administration/role-and-member-management.md @@ -17,7 +17,7 @@ This tutorial demonstrates how to manage roles and members in a project. At the ## Prerequisites -At least one project has been created, such as `demo-project`. Besides, you need an account of the `admin` role (for example, `project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +At least one project has been created, such as `demo-project`. Besides, you need a user of the `admin` role (for example, `project-admin`) at the project level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Built-in Roles @@ -30,31 +30,29 @@ In **Project Roles**, there are three available built-in roles as shown below. B viewer - The viewer who can view all resources in the project. + Project viewer who can view all resources in the project. operator - The maintainer of the project who can manage resources other than users and roles in the project. + Project operator who can manage resources other than users and roles in the project. admin - The administrator in the project who can perform any action on any resource. It gives full control over all resources in the project. + Project administrator who has full control over all resources in the project. To view the permissions that a role contains: -1. Log in to the console as `project-admin`. In **Project Roles**, click a role (for example, `admin`) and you can see role details as shown below. +1. Log in to the console as `project-admin`. In **Project Roles**, click a role (for example, `admin`) to view the role details. - ![project-role-details](/images/docs/project-administration/role-and-member-management/project-role-details.png) - -2. Click the **Authorized Users** tab to see all the users that are granted the role. +2. Click the **Authorized Users** tab to check users that have been granted the role. ## Create a Project Role 1. Navigate to **Project Roles** under **Project Settings**. -2. In **Project Roles**, click **Create** and set a role **Name** (for example, `project-monitor`). Click **Edit Permissions** to continue. +2. In **Project Roles**, click **Create** and set a role name (for example, `project-monitor`). Click **Edit Permissions** to continue. 3. In the pop-up window, permissions are categorized into different **Modules**. In this example, select **Application Workload Viewing** in **Application Workloads**, and **Alerting Message Viewing** and **Alerting Policy Viewing** in **Monitoring & Alerting**. Click **OK** to finish creating the role. @@ -66,16 +64,12 @@ To view the permissions that a role contains: 4. Newly-created roles will be listed in **Project Roles**. To edit an existing role, click on the right. - ![project-role-list](/images/docs/project-administration/role-and-member-management/project-role-list.png) - ## Invite a New Member -1. Navigate to **Project Members** under **Project Settings**, and click **Invite Member**. +1. Navigate to **Project Members** under **Project Settings**, and click **Invite**. 2. Invite a user to the project by clicking on the right of it and assign a role to it. 3. After you add the user to the project, click **OK**. In **Project Members**, you can see the user in the list. 4. To edit the role of an existing user or remove the user from the project, click on the right and select the corresponding operation. - - ![edit-project-account](/images/docs/project-administration/role-and-member-management/edit-project-account.png) diff --git a/content/en/docs/project-user-guide/alerting/alerting-message.md b/content/en/docs/project-user-guide/alerting/alerting-message.md index a67c9af87..507563542 100644 --- a/content/en/docs/project-user-guide/alerting/alerting-message.md +++ b/content/en/docs/project-user-guide/alerting/alerting-message.md @@ -11,18 +11,16 @@ Alerting messages record detailed information of alerts triggered based on the a ## Prerequisites - You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You have created a workload-level alerting policy and an alert has been triggered. For more information, refer to [Alerting Policies (Workload Level)](../alerting-policy/). ## View Alerting Messages -1. Log in to the console as `project-regular`, go to your project, and navigate to **Alerting Messages** under **Monitoring & Alerting**. +1. Log in to the console as `project-regular`, go to your project, and go to **Alerting Messages** under **Monitoring & Alerting**. -2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and message you have defined in the notification of the alert. To view details of an alerting message, click the name of the alerting policy and click the **Alerting Messages** tab on the page that appears. +2. On the **Alerting Messages** page, you can see all alerting messages in the list. The first column displays the summary and message you have defined in the notification of the alert. To view details of an alerting message, click the name of the alerting policy and click the **Alerting History** tab on the displayed page. - ![alerting-messages](/images/docs/project-user-guide/alerting/alerting-messages/alerting-messages.png) - -3. On the **Alerting Messages** tab, you can see alert severity, target resources, and alert time. +3. On the **Alerting History** tab, you can see alert severity, monitoring targets, and activation time. ## View Notifications diff --git a/content/en/docs/project-user-guide/alerting/alerting-policy.md b/content/en/docs/project-user-guide/alerting/alerting-policy.md index eb3fa3e8f..163a5d3d5 100644 --- a/content/en/docs/project-user-guide/alerting/alerting-policy.md +++ b/content/en/docs/project-user-guide/alerting/alerting-policy.md @@ -12,28 +12,26 @@ KubeSphere provides alerting policies for nodes and workloads. This tutorial dem - You have enabled [KubeSphere Alerting](../../../pluggable-components/alerting/). - To receive alert notifications, you must configure a [notification channel](../../../cluster-administration/platform-settings/notification-management/configure-email/) beforehand. -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You have workloads in this project. If they are not ready, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) to create a sample app. ## Create an Alerting Policy -1. Log in to the console as `project-regular` and go to your project. Navigate to **Alerting Policies** under **Monitoring & Alerting**, then click **Create**. +1. Log in to the console as `project-regular` and go to your project. Go to **Alerting Policies** under **Monitoring & Alerting**, then click **Create**. -2. In the dialog that appears, provide the basic information as follows. Click **Next** to continue. +2. In the displayed dialog box, provide the basic information as follows. Click **Next** to continue. - **Name**. A concise and clear name as its unique identifier, such as `alert-demo`. - **Alias**. Help you distinguish alerting policies better. - **Description**. A brief introduction to the alerting policy. - - **Duration (Minutes)**. An alert will be firing when the conditions defined for an alerting policy are met at any given point in the time range. + - **Threshold Duration (min)**. The status of the alerting policy becomes `Firing` when the duration of the condition configured in the alerting rule reaches the threshold. - **Severity**. Allowed values include **Warning**, **Error** and **Critical**, providing an indication of how serious an alert is. -3. On the **Alerting Rule** tab, you can use the rule template or create a custom rule. To use the template, fill in the following fields. +3. On the **Rule Settings** tab, you can use the rule template or create a custom rule. To use the template, fill in the following fields. - - **Resource Type**. Select the resource type you want to monitor, such as **Deployment**, **StatefulSet** and **DaemonSet**. - - **Monitoring Target**. Depending on the resource type you select, the target can be different. You cannot see any target if you do not have any workload in the project. - - **Alerting Rules**. Define a rule for the alerting policy. These rules are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU and memory. - - ![rule-template](/images/docs/project-user-guide/alerting/alerting-policies/rule-template.png) + - **Resource Type**. Select the resource type you want to monitor, such as **Deployment**, **StatefulSet**, and **DaemonSet**. + - **Monitoring Targets**. Depending on the resource type you select, the target can be different. You cannot see any target if you do not have any workload in the project. + - **Alerting Rule**. Define a rule for the alerting policy. These rules are based on Prometheus expressions and an alert will be triggered when conditions are met. You can monitor objects such as CPU and memory. {{< notice note >}} @@ -43,24 +41,20 @@ KubeSphere provides alerting policies for nodes and workloads. This tutorial dem Click **Next** to continue. -4. On the **Notification Settings** tab, enter the alert summary and message to be included in your notification, then click **Create**. +4. On the **Message Settings** tab, enter the alert summary and message to be included in your notification, then click **Create**. -5. An alerting policy will be **Inactive** when just created. If conditions in the rule expression are met, it will reach **Pending** first, then turn to **Firing** if conditions keep to be met in the given time range. +5. An alerting policy will be **Inactive** when just created. If conditions in the rule expression are met, it reaches **Pending** first, then turn to **Firing** if conditions keep to be met in the given time range. ## Edit an Alerting Policy To edit an alerting policy after it is created, on the **Alerting Policies** page, click on the right. -1. Click **Edit** from the drop-down menu and edit the alerting policy following the same steps as you create it. Click **Update** on the **Notification Settings** page to save it. - - ![alert-policy-created](/images/docs/project-user-guide/alerting/alerting-policies/alert-policy-created.png) +1. Click **Edit** from the drop-down menu and edit the alerting policy following the same steps as you create it. Click **OK** on the **Message Settings** page to save it. 2. Click **Delete** from the drop-down menu to delete an alerting policy. ## View an Alerting Policy -Click an alerting policy on the **Alerting Policies** page to see its detail information, including alerting rules and alerting messages. You can also see the rule expression which is based on the template you use when creating the alerting policy. +Click an alerting policy on the **Alerting Policies** page to see its detail information, including alerting rules and alerting history. You can also see the rule expression which is based on the template you use when creating the alerting policy. -Under **Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Notification Settings** displays the customized message you set in notifications. - -![alerting-policy-detail](/images/docs/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png) \ No newline at end of file +Under **Alert Monitoring**, the **Alert Monitoring** chart shows the actual usage or amount of resources over time. **Alerting Message** displays the customized message you set in notifications. diff --git a/content/en/docs/project-user-guide/application-workloads/container-image-settings.md b/content/en/docs/project-user-guide/application-workloads/container-image-settings.md index 279f5d061..d844dcc98 100644 --- a/content/en/docs/project-user-guide/application-workloads/container-image-settings.md +++ b/content/en/docs/project-user-guide/application-workloads/container-image-settings.md @@ -1,40 +1,43 @@ --- -title: "Container Image Settings" +title: "Pod Settings" keywords: 'KubeSphere, Kubernetes, image, workload, setting, container' -description: 'Learn different properties on the dashboard in detail as you set container images for your workload.' -linkTitle: "Container Image Settings" +description: 'Learn different properties on the dashboard in detail as you set Pods for your workload.' +linkTitle: "Pod Settings" weight: 10280 --- -When you create Deployments, StatefulSets or DaemonSets, you need to specify a container image. At the same time, KubeSphere provides users with various options to customize workload configurations, such as health check probes, environment variables and start commands. This page illustrates detailed explanations of different properties in **Container Image**. +When you create Deployments, StatefulSets or DaemonSets, you need to specify a Pod. At the same time, KubeSphere provides users with various options to customize workload configurations, such as health check probes, environment variables and start commands. This page illustrates detailed explanations of different properties in **Pod Settings**. {{< notice tip >}} -You can enable **Edit Mode** in the upper-right corner to see corresponding values in the manifest file (YAML format) of properties on the dashboard. +You can enable **Edit YAML** in the upper-right corner to see corresponding values in the manifest file (YAML format) of properties on the dashboard. {{}} -## Container Image +## Pod Settings ### Pod Replicas Set the number of replicated Pods by clicking or , indicated by the `.spec.replicas` field in the manifest file. This option is not available for DaemonSets. -![pod-replicas](/images/docs/project-user-guide/application-workloads/container-image-settings/pod-replicas.png) +If you create Deployments in a multi-cluster project, select a replica scheduling mode under **Replica Scheduling Mode**: -### Add Container Image +- **Specify Replicas**: select clusters and set the number of Pod replicas in each cluster. +- **Specify Weights**: select clusters, set the total number of Pod replicas in **Total Replicas**, and specify a weight for each cluster. The Pod replicas will be proportionally scheduled to the clusters according to the weights. To change weights after a Deployment is created, click the name of the Deployment to go to its details page and change weights under **Weights** on the **Resource Status** tab. -After you click **Add Container Image**, you will see an image as below. +If you create StatefulSets in a multi-cluster project, select clusters and set the number of Pod replicas in each cluster under **Pod Replicas**. -![add-container-explan](/images/docs/project-user-guide/application-workloads/container-image-settings/add-image.png) +### Add Container -#### Image Search Bar +Click **Add Container** to add a container. -You can click on the right to select an image from the list or enter an image name to search it. KubeSphere provides Docker Hub images and your private image repository. If you want to use your private image repository, you need to create an Image Registry Secret first in **Secrets** under **Configurations**. +#### Image Search Box + +You can click on the right to select an image from the list or enter an image name to search it. KubeSphere provides Docker Hub images and your private image repository. If you want to use your private image repository, you need to create an Image Registry Secret first in **Secrets** under **Configuration**. {{< notice note >}} -Remember to press **Enter** on your keyboard after you enter an image name in the search bar. +Remember to press **Enter** on your keyboard after you enter an image name in the search box. {{}} @@ -48,7 +51,7 @@ The container name is automatically created by KubeSphere, which is indicated by #### Container Type -If you choose **Init Container**, it means the init container will be created for the workload. For more information about init containers, please visit [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/?spm=a2c4g.11186623.2.19.16704b3e9qHXPb). +If you choose **Init container**, it means the init container will be created for the workload. For more information about init containers, please visit [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/?spm=a2c4g.11186623.2.19.16704b3e9qHXPb). #### Resource Request @@ -57,11 +60,9 @@ The resource quota reserved by the container includes both CPU and memory resour - The CPU request is indicated by `.spec.containers[].resources.requests.cpu` in the manifest file. The CPU request can be exceeded. - The memory request is indicated by `.spec.containers[].resources.requests.memory` in the manifest file. The memory request can be exceeded but the container may clear up when node memory is insufficient. -![resource-request-limit](/images/docs/project-user-guide/application-workloads/container-image-settings/requests-limits.png) - #### Resource Limit -You can specify the upper limit of the resources that the application can use, including CPU and memory, to prevent excessive resources from being occupied. +You can specify the upper limit of the resources that the application can use, including CPU, memory, and GPU, to prevent excessive resources from being occupied. - The CPU limit is indicated by `.spec.containers[].resources.limits.cpu` in the manifest file. The CPU limit can be exceeded for a short time, and the container will not be stopped. - The memory limit is indicated by `.spec.containers[].resources.limits.memory` in the manifest file. The memory limit cannot be exceeded. If it exceeds, the container may be stopped or scheduled to another machine with sufficient resources. @@ -72,7 +73,9 @@ The CPU resource is measured in CPU units, or **Core** in KubeSphere. The memory {{}} -#### **Port/Service Settings** +To set **GPU Type**, select a GPU type from the drop-down list, which defaults to `nvidia.com/gpu`. **GPU Limit** defaults to no limit. + +#### **Port Settings** You need to set the access protocol for the container as well as port information. To use the default setting, click **Use Default Ports**. @@ -80,67 +83,55 @@ You need to set the access protocol for the container as well as port informatio This value is indicated by the `imagePullPolicy` field. On the dashboard, you can choose one of the following three options from the drop-down list. -![image-pull-policy](/images/docs/project-user-guide/application-workloads/container-image-settings/image-policy.png) +- **Use Local Image First**: It means that the image is pulled only if it does not exist locally. -- **Use Local Image First (ifNotPresent)**: It means that the image is pulled only if it does not exist locally. +- **Pull Image Always**: It means that the image is pulled whenever the pod starts. -- **Redownload Image (Always)**: It means that the image is pulled whenever the pod starts. - -- **Only Use Local Image (Never)**: It means that the image is not pulled no matter the image exists or not. +- **Use Local Image Only**: It means that the image is not pulled no matter the image exists or not. {{< notice tip>}} -- The default value is `IfNotPresent`, but the value of images tagged with `:latest` is `Always` by default. +- The default value is **Use Local Image First**, but the value of images tagged with `:latest` is **Pull Image Always** by default. - Docker will check it when pulling the image. If MD5 has not changed, it will not pull. - The `:latest` tag should be avoided as much as possible in the production environment, and the latest image can be automatically pulled by the `:latest` tag in the development environment. {{< /notice >}} -#### **Health Checker** +#### **Health Check** -Support **Liveness**, **Readiness**, and **Startup**. +Support liveness check, readiness check, and startup check. -![container-health-check](/images/docs/project-user-guide/application-workloads/container-image-settings/health-checker.png) +- **Liveness Check**: Liveness probes are used to know whether a container is running, indicated by `livenessProbe`. -- **Container Liveness Check**: Liveness probes are used to know whether a container is running, indicated by `livenessProbe`. +- **Readiness Check**: Readiness probes are used to know whether a container is ready to serve requests, indicated by `readinessProbe`. -- **Container Readiness Check**: Readiness probes are used to know whether a container is ready to serve requests, indicated by `readinessProbe`. +- **Startup Check**: Startup probes are used to know whether a container application has started, indicated by `startupProbe`. -- **Container Startup Check**: Startup probes are used to know whether a container application has started, indicated by `startupProbe`. +Liveness, Readiness and Startup Check include the configurations below: -Liveness, Readiness and Startup Check have all included the configurations below: +- **HTTP Request**: Perform an HTTP `Get` request on the specified port and path on the IP address of the container. If the response status code is greater than or equal to 200 and less than 400, the diagnosis is considered successful. The supported parameters include: -- **HTTPGetAction (HTTP Request Check)**: Perform an HTTP `Get` request on the specified port and path on the IP address of the container. If the response status code is greater than or equal to 200 and less than 400, the diagnosis is considered successful. The supported parameters include: - - ![http-request-check](/images/docs/project-user-guide/application-workloads/container-image-settings/http-check.png) - - - **Scheme**: HTTP or HTTPS, specified by `scheme`. - - **Path**: The path to access the HTTP server, specified by `path`. - - **Port**: The access port or port name is exposed by the container. The port number must be between 1 and 65535. The value is specified by `port`. - - **Initial Delays**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. - - **Period Seconds**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. - - **Timeouts**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. + - **Path**: HTTP or HTTPS, specified by `scheme`, the path to access the HTTP server, specified by `path`, the access port or port name is exposed by the container. The port number must be between 1 and 65535. The value is specified by `port`. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Timeout (s)**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. -- **TCPSocketAction (TCP Port Check)**: Perform a TCP check on the specified port on the IP address of the container. If the port is open, the diagnosis is considered successful. The supported parameters include: - - ![tcp-port-check](/images/docs/project-user-guide/application-workloads/container-image-settings/tcp-port.png) +- **TCP Port**: Perform a TCP check on the specified port on the IP address of the container. If the port is open, the diagnosis is considered successful. The supported parameters include: - **Port**: The access port or port name is exposed by the container. The port number must be between 1 and 65535. The value is specified by `port`. - - **Initial Delays**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. - - **Period Seconds**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. - **Timeouts**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. -- **ExecAction (Exec Command Check)**: Execute the specified command in the container. If the return code is 0 when the command exits, the diagnosis is considered successful. The supported parameters include: - - ![exec-command-check](/images/docs/project-user-guide/application-workloads/container-image-settings/exec-check.png) +- **Command**: Execute the specified command in the container. If the return code is 0 when the command exits, the diagnosis is considered successful. The supported parameters include: - **Command**: A detection command used to detect the health of the container, specified by `exec.command`. - - **Initial Delays**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. - - **Period Seconds**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. + - **Initial Delay (s)**: The number of seconds after the container has started before liveness probes are initiated, specified by `initialDelaySeconds`. It defaults to 0. + - **Check Interval (s)**: The probe frequency (in seconds), specified by `periodSeconds`. It defaults to 10. The minimum value is 1. - **Timeouts**: The number of seconds after which the probe times out, specified by `timeoutSeconds`. It defaults to 1. The minimum value is 1. - **Success Threshold**: The minimum consecutive successes for the probe to be considered successful after having failed, specified by `successThreshold`. It defaults to 1 and must be 1 for liveness and startup. The minimum value is 1. - **Failure Threshold**: The minimum consecutive failures for the probe to be considered failed after having succeeded, specified by `failureThreshold`. It defaults to 3. The minimum value is 1. @@ -149,11 +140,9 @@ Liveness, Readiness and Startup Check have all included the configurations below #### **Start Command** -By default, the container runs the default image command. +By default, a container runs the default image command. -![start-command](/images/docs/project-user-guide/application-workloads/container-image-settings/start-command.png) - -- **Run Command** refers to the `command` field of containers in the manifest file. +- **Command** refers to the `command` field of containers in the manifest file. - **Parameters** refers to the `args` field of containers in the manifest file. For more information about the command, please visit [Define a Command and Arguments for a Container](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/). @@ -162,11 +151,9 @@ For more information about the command, please visit [Define a Command and Argum Configure environment variables for Pods in the form of key-value pairs. -![envi-var](/images/docs/project-user-guide/application-workloads/container-image-settings/env-variables.png) - - name: The name of the environment variable, specified by `env.name`. - value: The value of the variable referenced, specified by `env.value`. -- type: The type of environment variables. It supports customization, configuration items, keys, and variable/variable references. +- Click **Use ConfigMap or Secret** to use an existing ConfigMap or Secret. For more information about the command, please visit [Pod variable](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/?spm=a2c4g.11186623.2.20.16704b3e9qHXPb). @@ -174,9 +161,7 @@ For more information about the command, please visit [Pod variable](https://kube A security context defines privilege and access control settings for a Pod or Container. For more information about the security context, please visit [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). -![security-context](/images/docs/project-user-guide/application-workloads/container-image-settings/container-security-context.png) - -#### **Sync Host Timezone** +#### **Synchronize Host Timezone** The time zone of the container will be consistent with that of the host after synchronization. @@ -190,13 +175,13 @@ Update strategies are different for different workloads. {{< tab "Deployments" >}} -The `.spec.strategy` field specifies the strategy used to replace old Pods with new ones. `.spec.strategy.type` can be `Recreate` or `RollingUpdate`. `RollingUpdate` is the default value. +The `.spec.strategy` field specifies the strategy used to replace old Pods with new ones. `.spec.strategy.type` can be `Recreate` or `Rolling Update`. `Rolling Update` is the default value. -- **RollingUpdate (Recommended)** +- **Rolling Update (recommended)** A rolling update means the instance of the old version will be gradually replaced with new ones. During the upgrade process, the traffic will be load balanced and distributed to the old and new instances simultaneously, so the service will not be interrupted. -- **Recreate** +- **Simultaneous Update** All existing Pods will be killed before new ones are created. Please note that the service will be interrupted during the update process. @@ -208,11 +193,11 @@ For more information about update strategies, please visit [Strategy in Deployme The drop-down menu under **Update Strategy** is indicated by the `.spec.updateStrategy` field of a StatefulSet in the manifest file. It allows you to handle updates of Pod containers, tags, resource requests or limits, and annotations. There are two strategies: -- **RollingUpdate (Recommended)** +- **Rolling Update (recommended)** If `.spec.template` is updated, the Pods in the StatefulSet will be automatically deleted with new pods created as replacements. Pods are updated in reverse ordinal order, sequentially deleted and created. A new Pod update will not begin until the previous Pod becomes up and running after it is updated. -- **OnDelete** +- **Update on Deletion** If `.spec.template` is updated, the Pods in the StatefulSet will not be automatically updated. You need to manually delete old Pods so that the controller can create new Pods. @@ -224,11 +209,11 @@ For more information about update strategies, please visit [StatefulSet Update S The drop-down menu under **Update Strategy** is indicated by the `.spec.updateStrategy` field of a DaemonSet in the manifest file. It allows you to handle updates of Pod containers, tags, resource requests or limits, and annotations. There are two strategies: -- **RollingUpdate (Recommended)** +- **Rolling Update (recommended)** If `.spec.template` is updated, old DaemonSet pods will be killed with new pods created automatically in a controlled fashion. At most one pod of the DaemonSet will be running on each node during the whole update process. -- **OnDelete** +- **Update on Deletion** If `.spec.template` is updated, new DaemonSet pods will only be created when you manually delete old DaemonSet pods. This is the same behavior of DaemonSets in Kubernetes version 1.5 or before. @@ -238,31 +223,31 @@ For more information about update strategies, please visit [DaemonSet Update Str {{}} -### The Number of Pods When Updated +### Rolling Update Settings {{< tabs >}} {{< tab "Deployments" >}} -**The number of Pods when updated** in a Deployment is different from that of a StatefulSet. +**Rolling Update Settings** in a Deployment is different from that of a StatefulSet. -- **The maximum unavailable number of Pods**: The maximum number of Pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 25%. -- **The maximum surge number of Pods**: The maximum number of Pods that can be scheduled above the desired number of Pods, specified by `maxSurge`. The default value is 25%. +- **Maximum Unavailable Pods**: The maximum number of Pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 25%. +- **Maximum Extra Pods**: The maximum number of Pods that can be scheduled above the desired number of Pods, specified by `maxSurge`. The default value is 25%. {{}} {{< tab "StatefulSets" >}} -When you partition an update, all Pods with an ordinal greater than or equal to the value you set in Partition are updated when you update the StatefulSet’s Pod specification. This field is specified by `.spec.updateStrategy.rollingUpdate.partition`, whose default value is 0. For more information about partitions, please visit [Partitions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions). +**Ordinal for Dividing Pod Replicas**: When you partition an update, all Pods with an ordinal greater than or equal to the value you set in Partition are updated when you update the StatefulSet’s Pod specification. This field is specified by `.spec.updateStrategy.rollingUpdate.partition`, whose default value is 0. For more information about partitions, please visit [Partitions](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions). {{}} {{< tab "DaemonSets" >}} -**The number of Pods when updated** in a DaemonSet is different from that of a StatefulSet. +**Rolling Update Settings** in a DaemonSet is different from that of a StatefulSet. -- **The maximum unavailable number of Pods**: The maximum number of pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 20%. -- **MinReadySeconds**: The minimum number of seconds before a newly created Pod of DaemonSet is treated as available, specified by `minReadySeconds`. The default value is 0. +- **Maximum Unavailable Pods**: The maximum number of pods that can be unavailable during the update, specified by `maxUnavailable`. The default value is 20%. +- **Minimum Running Time for Pod Readiness (s)**: The minimum number of seconds before a newly created Pod of DaemonSet is treated as available, specified by `minReadySeconds`. The default value is 0. {{}} @@ -272,11 +257,12 @@ When you partition an update, all Pods with an ordinal greater than or equal to A security context defines privilege and access control settings for a Pod or Container. For more information about Pod Security Policies, please visit [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). -### Deployment Mode +### Pod Scheduling Rules -You can select different deployment modes to switch between inter-pod affinity and inter-pod anti-affinity. In Kubernetes, inter-pod affinity is specified as field `podAffinity` of field `affinity` while inter-pod anti-affinity is specified as field `podAntiAffinity` of field `affinity`. In KubeSphere, both `podAffinity` and `podAntiAffinity` are set to `preferredDuringSchedulingIgnoredDuringExecution`. You can enable **Edit Mode** in the upper-right corner to see field details. +You can select different deployment modes to switch between inter-pod affinity and inter-pod anti-affinity. In Kubernetes, inter-pod affinity is specified as field `podAffinity` of field `affinity` while inter-pod anti-affinity is specified as field `podAntiAffinity` of field `affinity`. In KubeSphere, both `podAffinity` and `podAntiAffinity` are set to `preferredDuringSchedulingIgnoredDuringExecution`. You can enable **Edit YAML** in the upper-right corner to see field details. -- **Pod Decentralized Deployment** represents anti-affinity. -- **Pod Aggregation Deployment** represents affinity. +- **Decentralized Scheduling** represents anti-affinity. +- **Centralized Scheduling** represents affinity. +- **Custom Rules** is to add custom scheduling rules based on your needs. For more information about affinity and anti-affinity, please visit [Pod affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). diff --git a/content/en/docs/project-user-guide/application-workloads/cronjobs.md b/content/en/docs/project-user-guide/application-workloads/cronjobs.md index 33bf2e289..0e7c0ba9a 100644 --- a/content/en/docs/project-user-guide/application-workloads/cronjobs.md +++ b/content/en/docs/project-user-guide/application-workloads/cronjobs.md @@ -1,7 +1,7 @@ --- title: "CronJobs" -keywords: "KubeSphere, Kubernetes, jobs, cronjobs" -description: "Learn basic concepts of CronJobs and how to create CronJobs in KubeSphere." +keywords: "KubeSphere, Kubernetes, Jobs, CronJobs" +description: "Learn basic concepts of CronJobs and how to create CronJobs on KubeSphere." linkTitle: "CronJobs" weight: 10260 --- @@ -12,7 +12,7 @@ For more information, see [the official documentation of Kubernetes](https://kub ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a CronJob @@ -20,13 +20,9 @@ You need to create a workspace, a project and an account (`project-regular`). Th Log in to the console as `project-regular`. Go to **Jobs** of a project, choose **CronJobs** and click **Create**. -![cronjob-list](/images/docs/project-user-guide/application-workloads/cronjobs/click-create.png) - ### Step 2: Enter basic information -Enter the basic information. You can refer to the image below for each field. When you finish, click **Next**. - -![cronjob-create-basic-info](/images/docs/project-user-guide/application-workloads/cronjobs/basic-info.png) +Enter the basic information. You can refer to the instructions below for each field. When you finish, click **Next**. - **Name**: The name of the CronJob, which is also the unique identifier. - **Alias**: The alias name of the CronJob, making resources easier to identify. @@ -39,45 +35,39 @@ Enter the basic information. You can refer to the image below for each field. Wh | Every Week | `0 0 * * 0` | | Every Month | `0 0 1 * *` | -- **Advanced Settings (Execution Parameters)**: +- **Advanced Settings**: - - **staringDeadlineSeconds**. Specified by `.spec.startingDeadlineSeconds` in the manifest file, this optional field represents the maximum number of seconds that a ConJob can take to start if it misses the scheduled time for any reason. CronJobs that have missed executions will be counted as failed ones. If you do not specify this field, there is no deadline for the CronJob. - - **successfulJobsHistoryLimit**. Specified by `.spec.successfulJobsHistoryLimit` in the manifest file, this field represents the number of successful CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 3. - - **failedJobsHistoryLimit**. Specified by `.spec.failedJobsHistoryLimit` in the manifest file, this field represents the number of failed CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 1. - - **concurrencyPolicy**. Specified by `.spec.concurrencyPolicy`, it represents how to treat concurrent executions of a Job. Valid values are: - - **Allow** (default): It allows CronJobs to run concurrently. - - **Forbid**: It forbids concurrent runs, skipping the next run if the previous run hasn't finished yet. - - **Replace**: It cancels currently running Job and replaces it with a new one. + - **Maximum Start Delay (s)**. Specified by `.spec.startingDeadlineSeconds` in the manifest file, this optional field represents the maximum number of seconds that a ConJob can take to start if it misses the scheduled time for any reason. CronJobs that have missed executions will be counted as failed ones. If you do not specify this field, there is no deadline for the CronJob. + - **Successful Jobs Retained**. Specified by `.spec.successfulJobsHistoryLimit` in the manifest file, this field represents the number of successful CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 3. + - **Failed Jobs Retained**. Specified by `.spec.failedJobsHistoryLimit` in the manifest file, this field represents the number of failed CronJob executions to retain. This is a pointer to distinguish between explicit zero and not specified. It defaults to 1. + - **Concurrency Policy**. Specified by `.spec.concurrencyPolicy`, it represents how to treat concurrent executions of a Job: + - **Run Jobs concurrently** (default): Run CronJobs concurrently. + - **Skip new Job**: Forbid concurrent runs and skip the next run if the previous run hasn't finished yet. + - **Skip old Job**: Cancels currently running Job and replaces it with a new one. {{< notice note >}} -You can enable **Edit Mode** in the top-right corner to see the YAML manifest of this CronJob. +You can enable **Edit YAML** in the upper-right corner to see the YAML manifest of this CronJob. {{}} -### Step 3: ConJob settings (Optional) +### Step 3: Strategy settings (Optional) -Please refer to [Jobs](../jobs/#step-3-job-settings-optional). +Please refer to [Jobs](../jobs/#step-3-strategy-settings-optional). -### Step 4: Set an image +### Step 4: Set a Pod -1. Click **Add Container Image** in **Container Image**, enter `busybox` in the search bar, and press **Enter**. - - ![input-busybox](/images/docs/project-user-guide/application-workloads/cronjobs/set-image.png) +1. Click **Add Container** in **Containers**, enter `busybox` in the search box, and press **Enter**. 2. Scroll down to **Start Command** and enter `/bin/sh,-c,date; echo "KubeSphere!"` in the box under **Parameters**. - ![start-command](/images/docs/project-user-guide/application-workloads/cronjobs/start-command.png) - 3. Click **√** to finish setting the image and **Next** to continue. - ![finish-image](/images/docs/project-user-guide/application-workloads/cronjobs/image-set.png) - {{< notice note >}} -- This example CronJob prints `KubeSphere`. For more information about setting images, see [Container Image Settings](../container-image-settings/). +- This example CronJob prints `KubeSphere`. For more information about setting images, see [Pod Settings](../container-image-settings/). - For more information about **Restart Policy**, see [Jobs](../jobs/#step-4-set-image). -- You can skip **Mount Volumes** and **Advanced Settings** for this tutorial. For more information, see [Mount Volumes](../deployments/#step-4-mount-volumes) and [Configure Advanced Settings](../deployments/#step-5-configure-advanced-settings) in Deployments. +- You can skip **Volume Settings** and **Advanced Settings** for this tutorial. For more information, see [Mount Volumes](../deployments/#step-4-mount-volumes) and [Configure Advanced Settings](../deployments/#step-5-configure-advanced-settings) in Deployments. {{}} @@ -85,51 +75,31 @@ Please refer to [Jobs](../jobs/#step-3-job-settings-optional). 1. In the final step of **Advanced Settings**, click **Create** to finish. A new item will be added to the CronJob list if the creation is successful. Besides, you can also find Jobs under **Jobs** tab. - ![cronjob-list-new](/images/docs/project-user-guide/application-workloads/cronjobs/cronjob.png) +2. Under the **ConJobs** tab, click this CronJob and go to the **Job Records** tab where you can see the information of each execution record. There are 3 successful CronJob executions as the field **Successful Jobs Retained** is set to 3. - ![job-list](/images/docs/project-user-guide/application-workloads/cronjobs/jobs.png) +3. Click any of them and you will be directed to the Job details page. -2. Under the **ConJobs** tab, click this CronJob and go to the **Job Records** tab where you can see the information of each execution record. There are 3 successful CronJob executions as the field **successfulJobsHistoryLimit** is set to 3. - - ![execution-record](/images/docs/project-user-guide/application-workloads/cronjobs/exe-records.png) - -3. Click any of them and you will be directed to the Job detail page. - - ![job-detail-page](/images/docs/project-user-guide/application-workloads/cronjobs/job-detail.png) - -4. In **Resource Status**, you can inspect the Pod status. Click on the right and check the container log as shown below, which displays the expected output. - - ![container-log-1](/images/docs/project-user-guide/application-workloads/cronjobs/view-log.png) - - ![container-log-2](/images/docs/project-user-guide/application-workloads/cronjobs/log-detail.png) +4. In **Resource Status**, you can inspect the Pod status. Click on the right and click to check the container log as shown below, which displays the expected output. ## Check CronJob Details ### Operations -On the CronJob detail page, you can manage the CronJob after it is created. +On the CronJob details page, you can manage the CronJob after it is created. - **Edit Information**: Edit the basic information except `Name` of the CronJob. - **Pause/Start**: Pause or start the Cronjob. Pausing a CronJob will tell the controller to suspend subsequent executions, which does not apply to executions that already start. - **Edit YAML**: Edit the CronJob's specification in YAML format. - **Delete**: Delete the CronJob, and return to the CronJob list page. -![cronjob-action](/images/docs/project-user-guide/application-workloads/cronjobs/modify.png) - ### Job records Click the **Job Records** tab to view the records of the CronJob. -![job-records](/images/docs/project-user-guide/application-workloads/cronjobs/job-records.png) - ### Metadata Click the **Metadata** tab to view the labels and annotations of the CronJob. -![metadata](/images/docs/project-user-guide/application-workloads/cronjobs/metadata.png) - ### Events Click the **Events** tab to view the events of the CronJob. - -![events](/images/docs/project-user-guide/application-workloads/cronjobs/events.png) diff --git a/content/en/docs/project-user-guide/application-workloads/daemonsets.md b/content/en/docs/project-user-guide/application-workloads/daemonsets.md index c01b99063..0c3159e77 100644 --- a/content/en/docs/project-user-guide/application-workloads/daemonsets.md +++ b/content/en/docs/project-user-guide/application-workloads/daemonsets.md @@ -1,5 +1,5 @@ --- -title: "DaemonSets" +title: "Kubernetes DaemonSets in KubeSphere" keywords: 'KubeSphere, Kubernetes, DaemonSet, workload' description: 'Learn basic concepts of DaemonSets and how to create DaemonSets in KubeSphere.' linkTitle: "DaemonSets" @@ -10,7 +10,7 @@ A DaemonSet manages groups of replicated Pods while it ensures that all (or some For more information, see the [official documentation of Kubernetes](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/). -## Use DaemonSets +## Use Kubernetes DaemonSets DaemonSets are very helpful in cases where you want to deploy ongoing background tasks that run on all or certain nodes without any user intervention. For example: @@ -20,7 +20,7 @@ DaemonSets are very helpful in cases where you want to deploy ongoing background ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a DaemonSet @@ -28,44 +28,34 @@ You need to create a workspace, a project and an account (`project-regular`). Th Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the tab **DaemonSets**. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/click-create.png) - ### Step 2: Enter basic information Specify a name for the DaemonSet (for example, `demo-daemonset`) and click **Next** to continue. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/basic-info.png) +### Step 3: Set a Pod -### Step 3: Set an image +1. Click **Add Container**. -1. Click **Add Container Image**. - - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/click-add-image.png) - -2. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `fluentd` in the search bar and press **Enter**. - - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/enter-image.png) +2. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `fluentd` in the search box and press **Enter**. {{< notice note >}} -- Remember to press **Enter** on your keyboard after you enter an image name in the search bar. -- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configurations**. +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. {{}} 3. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). - ![daemonset-request-limit](/images/docs/project-user-guide/application-workloads/daemonsets/set-requests-limits.png) - 4. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. 5. Select a policy for image pulling from the drop-down menu. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). -6. For other settings (**Health Checker**, **Start Command**, **Environment Variables**, **Container Security Context** and **Sync Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Container Image Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. +6. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. -7. Select an update strategy from the drop-down menu. It is recommended you choose **RollingUpdate**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). +7. Select an update strategy from the drop-down menu. It is recommended you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). -8. Select a deployment mode. For more information, see [Deployment Mode](../container-image-settings/#deployment-mode). +8. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). 9. Click **Next** to continue when you finish setting the container image. @@ -73,8 +63,6 @@ Specify a name for the DaemonSet (for example, `demo-daemonset`) and click **Nex You can add a volume directly or mount a ConfigMap or Secret. Alternatively, click **Next** directly to skip this step. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/mount-volumes.png) - {{< notice note >}} DaemonSets can't use a volume template, which is used by StatefulSets. @@ -85,52 +73,40 @@ DaemonSets can't use a volume template, which is used by StatefulSets. You can add metadata in this section. When you finish, click **Create** to complete the whole process of creating a DaemonSet. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/advanced-settings.png) - - **Add Metadata** Additional metadata settings for resources such as **Labels** and **Annotations**. -## Check DaemonSet Details +## Check Kubernetes DaemonSet Details -### Detail page +### Details page -1. After a DaemonSet is created, it will be displayed in the list as below. You can click on the right and select the options from the menu to modify a DaemonSet. +1. After a DaemonSet is created, it will be displayed in the list. You can click on the right and select the options from the menu to modify a DaemonSet. - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/click-dots.png) - - - **Edit**: View and edit the basic information. + - **Edit Information**: View and edit the basic information. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy the DaemonSet. + - **Re-create**: Re-create the DaemonSet. - **Delete**: Delete the DaemonSet. -2. Click the name of the DaemonSet and you can go to its detail page. - - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/detail-page.png) +2. Click the name of the DaemonSet and you can go to its details page. 3. Click **More** to display what operations about this DaemonSet you can do. - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/click-more.png) - - - **Revision Rollback**: Select the revision to roll back. - - **Edit Config Template**: Configure update strategies, containers and volumes. + - **Roll Back**: Select the revision to roll back. + - **Edit Settings**: Configure update strategies, containers and volumes. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy this DaemonSet. + - **Re-create**: Re-create this DaemonSet. - **Delete**: Delete the DaemonSet, and return to the DaemonSet list page. 4. Click the **Resource Status** tab to view the port and Pod information of a DaemonSet. - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/resource-status.png) - - **Replica Status**: You cannot change the number of Pod replicas for a DaemonSet. - - **Pod detail** - - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/pod-detail.png) + - **Pods** - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). - You can view the container information by clicking a Pod item. - Click the container log icon to view output logs of the container. - - You can view the Pod detail page by clicking the Pod name. + - You can view the Pod details page by clicking the Pod name. ### Revision records @@ -140,17 +116,11 @@ After the resource template of workload is changed, a new log will be generated Click the **Metadata** tab to view the labels and annotations of the DaemonSet. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/metadata.png) - ### Monitoring 1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the DaemonSet. - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/monitoring.png) - -2. Click the drop-down menu in the upper-right corner to customize the time range and time interval. - - ![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/click-time-interval.png) +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. 3. Click / in the upper-right corner to start/stop automatic data refreshing. @@ -160,11 +130,8 @@ Click the **Metadata** tab to view the labels and annotations of the DaemonSet. Click the **Environment Variables** tab to view the environment variables of the DaemonSet. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/env-variables.png) - ### Events Click the **Events** tab to view the events of the DaemonSet. -![daemonsets](/images/docs/project-user-guide/application-workloads/daemonsets/events.png) diff --git a/content/en/docs/project-user-guide/application-workloads/deployments.md b/content/en/docs/project-user-guide/application-workloads/deployments.md index cb6d87893..cba033e6a 100644 --- a/content/en/docs/project-user-guide/application-workloads/deployments.md +++ b/content/en/docs/project-user-guide/application-workloads/deployments.md @@ -13,7 +13,7 @@ For more information, see the [official documentation of Kubernetes](https://kub ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a Deployment @@ -21,61 +21,47 @@ You need to create a workspace, a project and an account (`project-regular`). Th Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the tab **Deployments**. -![deployments](/images/docs/project-user-guide/application-workloads/deployments/deployments.png) - ### Step 2: Enter basic information Specify a name for the Deployment (for example, `demo-deployment`) and click **Next** to continue. -![deployments](/images/docs/project-user-guide/application-workloads/deployments/enter-info.png) - -### Step 3: Set an image +### Step 3: Set a Pod 1. Before you set an image, define the number of replicated Pods in **Pod Replicas** by clicking or , which is indicated by the `.spec.replicas` field in the manifest file. {{< notice tip >}} -You can see the Deployment manifest file in YAML format by enabling **Edit Mode** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Deployment. Alternatively, you can follow the steps below to create a Deployment via the dashboard. +You can see the Deployment manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Deployment. Alternatively, you can follow the steps below to create a Deployment via the dashboard. {{}} - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/set-replicas.png) +2. Click **Add Container**. -2. Click **Add Container Image**. - - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/add-container-image.png) - -3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search bar and press **Enter**. - - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/enter-nginx.png) +3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search box and press **Enter**. {{< notice note >}} -- Remember to press **Enter** on your keyboard after you enter an image name in the search bar. -- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configurations**. +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. {{}} 4. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/set-requests-limits.png) - 5. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. -6. Select a policy for image pulling from the drop-down menu. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). +6. Select a policy for image pulling from the drop-down list. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). -7. For other settings (**Health Checker**, **Start Command**, **Environment Variables**, **Container Security Context** and **Sync Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Container Image Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. +7. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. -8. Select an update strategy from the drop-down menu. It is recommended that you choose **RollingUpdate**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). +8. Select an update strategy from the drop-down menu. It is recommended that you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). -9. Select a deployment mode. For more information, see [Deployment Mode](../container-image-settings/#deployment-mode). +9. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). -10. Click **Next** to continue when you finish setting the container image. +10. Click **Next** to continue when you finish setting the Pod. ### Step 4: Mount volumes You can add a volume directly or mount a ConfigMap or Secret. Alternatively, click **Next** directly to skip this step. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). -![deployments](/images/docs/project-user-guide/application-workloads/deployments/mount-volumes.png) - {{< notice note >}} Deployments can't use a volume template, which is used by StatefulSets. @@ -86,11 +72,9 @@ Deployments can't use a volume template, which is used by StatefulSets. You can set a policy for node scheduling and add metadata in this section. When you finish, click **Create** to complete the whole process of creating a Deployment. -![deployments](/images/docs/project-user-guide/application-workloads/deployments/advanced-settings.png) +- **Select Nodes** -- **Set Node Scheduling Policy** - - You can allow Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. + Assign Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. - **Add Metadata** @@ -98,65 +82,49 @@ You can set a policy for node scheduling and add metadata in this section. When ## Check Deployment Details -### Detail page +### Details page -1. After a Deployment is created, it will be displayed in the list as below. You can click on the right and select options from the menu to modify your Deployment. +1. After a Deployment is created, it will be displayed in the list. You can click on the right and select options from the menu to modify your Deployment. - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/click-to-modify.png) - - - **Edit**: View and edit the basic information. + - **Edit Information**: View and edit the basic information. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy the Deployment. + - **Re-create**: Re-create the Deployment. - **Delete**: Delete the Deployment. -2. Click the name of the Deployment and you can go to its detail page. - - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/deploy-details.png) +2. Click the name of the Deployment and you can go to its details page. 3. Click **More** to display the operations about this Deployment you can do. - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/click-more.png) - - - **Revision Rollback**: Select the revision to roll back. - - **Horizontal Pod Autoscaling**: Autoscale the replicas according to CPU and memory usage. If both CPU and memory are specified, replicas are added or deleted if any of the conditions is met. - - **Edit Config Template**: Configure update strategies, containers and volumes. + - **Roll Back**: Select the revision to roll back. + - **Edit Autoscaling**: Autoscale the replicas according to CPU and memory usage. If both CPU and memory are specified, replicas are added or deleted if any of the conditions is met. + - **Edit Settings**: Configure update strategies, containers and volumes. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy this Deployment. + - **Re-create**: Re-create this Deployment. - **Delete**: Delete the Deployment, and return to the Deployment list page. 4. Click the **Resource Status** tab to view the port and Pod information of the Deployment. - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/resource-status.png) - - - **Replica Status**: Click or to increase or decrease the number of Pod replicas. - - **Pod detail** - - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/pods.png) + - **Replica Status**: Click or to increase or decrease the number of Pod replicas. + - **Pods** - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). - You can view the container information by clicking a Pod item. - Click the container log icon to view output logs of the container. - - You can view the Pod detail page by clicking the Pod name. + - You can view the Pod details page by clicking the Pod name. ### Revision records After the resource template of workload is changed, a new log will be generated and Pods will be rescheduled for a version update. The latest 10 versions will be saved by default. You can implement a redeployment based on the change log. -### Matadata +### Metadata Click the **Metadata** tab to view the labels and annotations of the Deployment. -![deployments](/images/docs/project-user-guide/application-workloads/deployments/meta-data.png) - ### Monitoring 1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the Deployment. - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/monitoring.png) - -2. Click the drop-down menu in the upper-right corner to customize the time range and time interval. - - ![deployments](/images/docs/project-user-guide/application-workloads/deployments/time-interval.png) +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. 3. Click / in the upper-right corner to start/stop automatic data refreshing. @@ -166,10 +134,6 @@ Click the **Metadata** tab to view the labels and annotations of the Deployment. Click the **Environment Variables** tab to view the environment variables of the Deployment. -![deployments](/images/docs/project-user-guide/application-workloads/deployments/env-variables.png) - ### Events -Click the **Events** tab to view the events of the Deployment. - -![deployments](/images/docs/project-user-guide/application-workloads/deployments/events.png) \ No newline at end of file +Click the **Events** tab to view the events of the Deployment. \ No newline at end of file diff --git a/content/en/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md b/content/en/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md index bb9fe21d8..262e2587a 100755 --- a/content/en/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md +++ b/content/en/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md @@ -15,7 +15,7 @@ This document uses HPA based on CPU usage as an example. Operations for HPA base ## Prerequisites - You need to [enable the Metrics Server](https://kubesphere.io/docs/pluggable-components/metrics-server/). -- You need to create a workspace, a project and an account (for example, `project-regular`). `project-regular` must be invited to the project and assigned the `operator` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](/docs/quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (for example, `project-regular`). `project-regular` must be invited to the project and assigned the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](/docs/quick-start/create-workspace-and-project/). ## Create a Service @@ -23,19 +23,11 @@ This document uses HPA based on CPU usage as an example. Operations for HPA base 2. Choose **Services** in **Application Workloads** on the left navigation bar and click **Create** on the right. - ![create-service](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png) - 3. In the **Create Service** dialog box, click **Stateless Service**. - ![stateless-service](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png) - 4. Set the Service name (for example, `hpa`) and click **Next**. - ![service-name](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png) - -5. Click **Add Container Image**, set **Image** to `mirrorgooglecontainers/hpa-example` and click **Use Default Ports**. - - ![add-container-image](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png) +5. Click **Add Container**, set **Image** to `mirrorgooglecontainers/hpa-example` and click **Use Default Ports**. 6. Set the CPU request (for example, 0.15 cores) for each container, click **√**, and click **Next**. @@ -46,28 +38,22 @@ This document uses HPA based on CPU usage as an example. Operations for HPA base {{}} - ![cpu-request](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png) - -7. Click **Next** on the **Mount Volumes** tab and click **Create** on the **Advanced Settings** tab. +7. Click **Next** on the **Volume Settings** tab and click **Create** on the **Advanced Settings** tab. ## Configure Kubernetes HPA -1. Choose **Deployments** in **Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. +1. Select **Deployments** in **Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. - ![hpa-deployment](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png) - -2. Click **More** and choose **Horizontal Pod Autoscaling** from the drop-down list. - - ![horizontal-pod-autoscaling](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png) +2. Click **More** and select **Edit Autoscaling** from the drop-down menu. 3. In the **Horizontal Pod Autoscaling** dialog box, configure the HPA parameters and click **OK**. - * **CPU Target Utilization**: Target percentage of the average Pod CPU request. - * **Memory Target Usage**: Target average Pod memory usage in MiB. - * **Min Replicas Number**: Minimum number of Pods. - * **Max Replicas Number**: Maximum number of Pods. + * **Target CPU Usage (%)**: Target percentage of the average Pod CPU request. + * **Target Memory Usage (MiB)**: Target average Pod memory usage in MiB. + * **Minimum Replicas**: Minimum number of Pods. + * **Maximum Replicas**: Maximum number of Pods. - In this example, **CPU Target Utilization** is set to `60`, **Min Replicas Number** is set to `1`, and **Max Replicas Number** is set to `10`. + In this example, **Target CPU Usage (%)** is set to `60`, **Minimum Replicas** is set to `1`, and **Maximum Replicas** is set to `10`. {{< notice note >}} @@ -75,49 +61,29 @@ This document uses HPA based on CPU usage as an example. Operations for HPA base {{}} - ![hpa-parameters](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png) - ## Verify HPA This section uses a Deployment that sends requests to the HPA Service to verify that HPA automatically adjusts the number of Pods to meet the resource usage target. ### Create a load generator Deployment -1. Choose **Workloads** in **Application Workloads** on the left navigation bar and click **Create** on the right. - - ![create-deployment](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png) +1. Select **Workloads** in **Application Workloads** on the left navigation bar and click **Create** on the right. 2. In the **Create Deployment** dialog box, set the Deployment name (for example, `load-generator`) and click **Next**. - ![deployment-name](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png) +3. Click **Add Container** and set **Image** to `busybox`. -3. Click **Add Container Image** and set **Image** to `busybox`. - - ![busybox](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png) - -4. Scroll down in the dialog box, select **Start Command**, and set **Run Command** to `sh,-c` and **Parameters** to `while true; do wget -q -O- http://..svc.cluster.local; done` (for example, `while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`). - - ![start-command](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png) +4. Scroll down in the dialog box, select **Start Command**, and set **Command** to `sh,-c` and **Parameters** to `while true; do wget -q -O- http://..svc.cluster.local; done` (for example, `while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`). 5. Click **√** and click **Next**. -6. Click **Next** on the **Mount Volumes** tab and click **Create** on the **Advanced Settings** tab. +6. Click **Next** on the **Volume Settings** tab and click **Create** on the **Advanced Settings** tab. ### View the HPA Deployment status -1. After the load generator Deployment is created, choose **Workloads** in **Application Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. +1. After the load generator Deployment is created, go to **Workloads** in **Application Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. The number of Pods displayed on the page automatically increases to meet the resource usage target. - The number of Pods automatically increases to meet the resource usage target. - - ![target-cpu-utilization](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png) - - ![pods-increase](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png) - -2. Choose **Workloads** in **Application Workloads** on the left navigation bar, click on the right of the load generator Deployment (for example, load-generator-v1), and choose **Delete** from the drop-down list. After the load-generator Deployment is deleted, check the status of the HPA Deployment again. - - The number of Pods decreases to the minimum. - - ![pods-decrease](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png) +2. Choose **Workloads** in **Application Workloads** on the left navigation bar, click on the right of the load generator Deployment (for example, load-generator-v1), and choose **Delete** from the drop-down list. After the load-generator Deployment is deleted, check the status of the HPA Deployment again. The number of Pods decreases to the minimum. {{< notice note >}} @@ -133,7 +99,6 @@ You can repeat steps in [Configure HPA](#configure-hpa) to edit the HPA configur 1. Choose **Workloads** in **Application Workloads** on the left navigation bar and click the HPA Deployment (for example, hpa-v1) on the right. -2. Click on the right of **Horizontal Pod Autoscaling** and choose **Cancel** from the drop-down list. +2. Click on the right of **Autoscaling** and choose **Cancel** from the drop-down list. - ![cancel-hpa](/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png) diff --git a/content/en/docs/project-user-guide/application-workloads/jobs.md b/content/en/docs/project-user-guide/application-workloads/jobs.md index 195ce4d2c..b60d34657 100644 --- a/content/en/docs/project-user-guide/application-workloads/jobs.md +++ b/content/en/docs/project-user-guide/application-workloads/jobs.md @@ -1,7 +1,7 @@ --- title: "Jobs" -keywords: "KubeSphere, Kubernetes, docker, jobs" -description: "Learn basic concepts of Jobs and how to create Jobs in KubeSphere." +keywords: "KubeSphere, Kubernetes, Docker, Jobs" +description: "Learn basic concepts of Jobs and how to create Jobs on KubeSphere." linkTitle: "Jobs" weight: 10250 @@ -15,7 +15,7 @@ The following example demonstrates specific steps of creating a Job (computing ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a Job @@ -23,8 +23,6 @@ You need to create a workspace, a project and an account (`project-regular`). Th Log in to the console as `project-regular`. Go to **Jobs** under **Application Workloads** and click **Create**. -![create-job](/images/docs/project-user-guide/application-workloads/jobs/click-create.png) - ### Step 2: Enter basic information Enter the basic information. Refer to the image below as an example. @@ -33,34 +31,26 @@ Enter the basic information. Refer to the image below as an example. - **Alias**: The alias name of the Job, making resources easier to identify. - **Description**: The description of the Job, which gives a brief introduction of the Job. -![job-create-basic-info](/images/docs/project-user-guide/application-workloads/jobs/basic-info.png) +### Step 3: Strategy settings (optional) -### Step 3: Job settings (optional) - -You can set the values in this step as below or click **Next** to use the default values. Refer to the table below for detailed explanations of each field. - -![job-create-job-settings](/images/docs/project-user-guide/application-workloads/jobs/job-settings.png) +You can set the values in this step or click **Next** to use the default values. Refer to the table below for detailed explanations of each field. | Name | Definition | Description | | ----------------------- | ---------------------------- | ------------------------------------------------------------ | -| Back off Limit | `spec.backoffLimit` | It specifies the number of retries before this Job is marked failed. It defaults to 6. | -| Completions | `spec.completions` | It specifies the desired number of successfully finished Pods the Job should be run with. Setting it to nil means that the success of any Pod signals the success of all Pods, and allows parallelism to have any positive value. Setting it to 1 means that parallelism is limited to 1 and the success of that Pod signals the success of the Job. For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | -| Parallelism | `spec.parallelism` | It specifies the maximum desired number of Pods the Job should run at any given time. The actual number of Pods running in a steady state will be less than this number when the work left to do is less than max parallelism ((`.spec.completions - .status.successful`) < `.spec.parallelism`). For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | -| Active Deadline Seconds | `spec.activeDeadlineSeconds` | It specifies the duration in seconds relative to the startTime that the Job may be active before the system tries to terminate it; the value must be a positive integer. | +| Maximum Retries | `spec.backoffLimit` | It specifies the maximum number of retries before this Job is marked as failed. It defaults to 6. | +| Complete Pods | `spec.completions` | It specifies the desired number of successfully finished Pods the Job should be run with. Setting it to nil means that the success of any Pod signals the success of all Pods, and allows parallelism to have any positive value. Setting it to 1 means that parallelism is limited to 1 and the success of that Pod signals the success of the Job. For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | +| Parallel Pods | `spec.parallelism` | It specifies the maximum desired number of Pods the Job should run at any given time. The actual number of Pods running in a steady state will be less than this number when the work left to do is less than max parallelism ((`.spec.completions - .status.successful`) < `.spec.parallelism`). For more information, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). | +| Maximum Duration (s) | `spec.activeDeadlineSeconds` | It specifies the duration in seconds relative to the startTime that the Job may be active before the system tries to terminate it; the value must be a positive integer. | -### Step 4: Set an image +### Step 4: Set a Pod -1. Select **Never** for **Restart Policy**. You can only specify **Never** or **OnFailure** for **Restart Policy** when the Job is not completed: +1. Select **Re-create Pod** for **Restart Policy**. You can only specify **Re-create Pod** or **Restart container** for **Restart Policy** when the Job is not completed: - - If **Restart Policy** is set to **Never**, the Job creates a new Pod when the Pod fails, and the failed Pod does not disappear. + - If **Restart Policy** is set to **Re-create Pod**, the Job creates a new Pod when the Pod fails, and the failed Pod does not disappear. - - If **Restart Policy** is set to **OnFailure**, the Job will internally restart the container when the Pod fails, instead of creating a new Pod. + - If **Restart Policy** is set to **Restart container**, the Job will internally restart the container when the Pod fails, instead of creating a new Pod. - ![job-container-settings](/images/docs/project-user-guide/application-workloads/jobs/restart-policy.png) - -2. Click **Add Container Image** which directs you to the **Add Container** page. Enter `perl` in the image search bar and press **Enter**. - - ![add-container-image-job](/images/docs/project-user-guide/application-workloads/jobs/set-image.png) +2. Click **Add Container** which directs you to the **Add Container** page. Enter `perl` in the image search box and press **Enter**. 3. On the same page, scroll down to **Start Command**. Enter the following commands in the box which computes pi to 2000 places then prints it. Click **√** in the lower-right corner and select **Next** to continue. @@ -68,13 +58,11 @@ You can set the values in this step as below or click **Next** to use the defaul perl,-Mbignum=bpi,-wle,print bpi(2000) ``` - ![start-command-job](/images/docs/project-user-guide/application-workloads/jobs/start-command.png) - - {{< notice note >}}For more information about setting images, see [Container Image Settings](../container-image-settings/).{{}} + {{< notice note >}}For more information about setting images, see [Pod Settings](../container-image-settings/).{{}} ### Step 5: Inspect the Job manifest (optional) -1. Enable **Edit Mode** in the upper-right corner which displays the manifest file of the Job. You can see all the values are set based on what you have specified in the previous steps. +1. Enable **Edit YAML** in the upper-right corner which displays the manifest file of the Job. You can see all the values are set based on what you have specified in the previous steps. ```yaml apiVersion: batch/v1 @@ -113,36 +101,28 @@ You can set the values in this step as below or click **Next** to use the defaul activeDeadlineSeconds: 300 ``` -2. You can make adjustments in the manifest directly and click **Create** or disable the **Edit Mode** and get back to the **Create Job** page. +2. You can make adjustments in the manifest directly and click **Create** or disable the **Edit YAML** and get back to the **Create** page. - {{< notice note >}}You can skip **Mount Volumes** and **Advanced Settings** for this tutorial. For more information, see [Mount volumes](../deployments/#step-4-mount-volumes) and [Configure advanced settings](../deployments/#step-5-configure-advanced-settings).{{}} + {{< notice note >}}You can skip **Volume Settings** and **Advanced Settings** for this tutorial. For more information, see [Mount volumes](../deployments/#step-4-mount-volumes) and [Configure advanced settings](../deployments/#step-5-configure-advanced-settings).{{}} ### Step 6: Check the result 1. In the final step of **Advanced Settings**, click **Create** to finish. A new item will be added to the Job list if the creation is successful. - ![job-list-new](/images/docs/project-user-guide/application-workloads/jobs/job-in-list.png) - -2. Click this Job and go to **Execution Records** where you can see the information of each execution record. There are four completed Pods since **Completions** was set to `4` in Step 3. - - ![execution-record](/images/docs/project-user-guide/application-workloads/jobs/exe-records.png) +2. Click this Job and go to **Job Records** where you can see the information of each execution record. There are four completed Pods since **Completions** was set to `4` in Step 3. {{< notice tip >}} -You can rerun the Job if it fails, the reason of which displays under **Messages**. +You can rerun the Job if it fails and the reason for failure is displayed under **Message**. {{}} -3. In **Resource Status**, you can inspect the Pod status. Two Pods were created each time as **Parallelism** was set to 2. Click on the right and check the container log as shown below, which displays the expected calculation result. - - ![container-log](/images/docs/project-user-guide/application-workloads/jobs/resource-status.png) - - ![container-log-check](/images/docs/project-user-guide/application-workloads/jobs/log.png) +3. In **Resource Status**, you can inspect the Pod status. Two Pods were created each time as **Parallel Pods** was set to 2. Click on the right and click to check the container log, which displays the expected calculation result. {{< notice tip >}} - In **Resource Status**, the Pod list provides the Pod's detailed information (for example, creation time, node, Pod IP and monitoring data). - You can view the container information by clicking the Pod. - Click the container log icon to view the output logs of the container. -- You can view the Pod detail page by clicking the Pod name. +- You can view the Pod details page by clicking the Pod name. {{}} @@ -150,20 +130,16 @@ You can rerun the Job if it fails, the reason of which displays under **Messages ### Operations -On the Job detail page, you can manage the Job after it is created. +On the Job details page, you can manage the Job after it is created. - **Edit Information**: Edit the basic information except `Name` of the Job. -- **Rerun Job**: Rerun the Job, the Pod will restart, and a new execution record will be generated. +- **Rerun**: Rerun the Job, the Pod will restart, and a new execution record will be generated. - **View YAML**: View the Job's specification in YAML format. - **Delete**: Delete the Job and return to the Job list page. -![job-operation](/images/docs/project-user-guide/application-workloads/jobs/modify-job.png) - ### Execution records -1. Click the **Execution Records** tab to view the execution records of the Job. - - ![execution-records](/images/docs/project-user-guide/application-workloads/jobs/execution-records.png) +1. Click the **Job Records** tab to view the execution records of the Job. 2. Click to refresh the execution records. @@ -171,24 +147,16 @@ On the Job detail page, you can manage the Job after it is created. 1. Click the **Resource Status** tab to view the Pods of the Job. - ![resource-status](/images/docs/project-user-guide/application-workloads/jobs/res-status.png) - 2. Click to refresh the Pod information, and click / to display/hide the containers in each Pod. ### Metadata Click the **Metadata** tab to view the labels and annotations of the Job. -![metadata](/images/docs/project-user-guide/application-workloads/jobs/metadata.png) - ### Environment variables Click the **Environment Variables** tab to view the environment variables of the Job. -![env-variable](/images/docs/project-user-guide/application-workloads/jobs/env-variables.png) - ### Events Click the **Events** tab to view the events of the Job. - -![events](/images/docs/project-user-guide/application-workloads/jobs/events.png) \ No newline at end of file diff --git a/content/en/docs/project-user-guide/application-workloads/routes.md b/content/en/docs/project-user-guide/application-workloads/routes.md index a0be0cf08..7addb2bb5 100644 --- a/content/en/docs/project-user-guide/application-workloads/routes.md +++ b/content/en/docs/project-user-guide/application-workloads/routes.md @@ -11,7 +11,7 @@ A Route on KubeSphere is the same as an [Ingress](https://kubernetes.io/docs/con ## Prerequisites -- You need to create a workspace, a project and two accounts (for example, `project-admin` and `project-regular`). In the project, the role of `project-admin` must be `admin` and that of `project-regular` must be `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](/docs/quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and two users (for example, `project-admin` and `project-regular`). In the project, the role of `admin` must be `project-admin` and that of `project-regular` must be `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](/docs/quick-start/create-workspace-and-project/). - If the Route is to be accessed in HTTPS mode, you need to [create a Secret](/docs/project-user-guide/configuration/secrets/) that contains the `tls.crt` (TLS certificate) and `tls.key` (TLS private key) keys used for encryption. - You need to [create at least one Service](/docs/project-user-guide/application-workloads/services/). This document uses a demo Service as an example, which returns the Pod name to external requests. @@ -19,28 +19,16 @@ A Route on KubeSphere is the same as an [Ingress](https://kubernetes.io/docs/con 1. Log in to the KubeSphere web console as `project-admin` and go to your project. -2. Choose **Advanced Settings** in **Project Settings** on the left navigation bar and click **Set Gateway** on the right. +2. Select **Gateway Settings** in **Project Settings** on the left navigation bar and click **Enable Gateway** on the right. + +3. In the displayed dialog box, set **Access Mode** to **NodePort** or **LoadBalancer**, and click **OK**. {{< notice note >}} - If the access method has been set, you can click **Edit** and choose **Edit Gateway** to change the access method. + If **Access Mode** is set to **LoadBalancer**, you may need to enable the load balancer plugin in your environment according to the plugin user guide. {{}} - ![set-gateway](/images/docs/project-user-guide/application-workloads/routes/set-gateway.png) - -3. In the displayed **Set Gateway** dialog box, set **Access Method** to **NodePort** or **LoadBalancer**, and click **Save**. - - {{< notice note >}} - - If **Access Method** is set to **LoadBalancer**, you may need to enable the load balancer plugin in your environment according to the plugin user guide. - - {{}} - - ![access-method-nodeport](/images/docs/project-user-guide/application-workloads/routes/access-method-nodeport.png) - - ![access-method-loadbalancer](/images/docs/project-user-guide/application-workloads/routes/access-method-loadbalancer.png) - ## Create a Route ### Step 1: Configure basic information @@ -49,34 +37,26 @@ A Route on KubeSphere is the same as an [Ingress](https://kubernetes.io/docs/con 2. Choose **Routes** in **Application Workloads** on the left navigation bar and click **Create** on the right. - ![create-route](/images/docs/project-user-guide/application-workloads/routes/create-route.png) - -3. On the **Basic Info** tab, configure the basic information about the Route and click **Next**. +3. On the **Basic Information** tab, configure the basic information about the Route and click **Next**. * **Name**: Name of the Route, which is used as a unique identifier. * **Alias**: Alias of the Route. * **Description**: Description of the Route. - ![basic-info](/images/docs/project-user-guide/application-workloads/routes/basic-info.png) +### Step 2: Configure routing rules -### Step 2: Configure Route rules +1. On the **Routing Rules** tab, click **Add Routing Rule**. -1. On the **Route Rules** tab, click **Add Route Rule**. - -2. Select a mode, configure Route rules, click **√**, and click **Next**. +2. Select a mode, configure routing rules, click **√**, and click **Next**. * **Auto Generate**: KubeSphere automatically generates a domain name in the `...nip.io` format and the domain name is automatically resolved by [nip.io](https://nip.io/) into the gateway address. This mode supports only HTTP. * **Paths**: Map each Service to a path. You can click **Add Path** to add multiple paths. - ![auto-generate](/images/docs/project-user-guide/application-workloads/routes/auto-generate.png) - * **Specify Domain**: A user-defined domain name is used. This mode supports both HTTP and HTTPS. - * **HostName**: Set a domain name for the Route. + * **Domain Name**: Set a domain name for the Route. * **Protocol**: Select `http` or `https`. If `https` is selected, you need to select a Secret that contains the `tls.crt` (TLS certificate) and `tls.key` (TLS private key) keys used for encryption. * **Paths**: Map each Service to a path. You can click **Add Path** to add multiple paths. - - ![specify-domain](/images/docs/project-user-guide/application-workloads/routes/specify-domain.png) ### (Optional) Step 3: Configure advanced settings @@ -90,55 +70,43 @@ A Route on KubeSphere is the same as an [Ingress](https://kubernetes.io/docs/con {{}} - ![add-metadata](/images/docs/project-user-guide/application-workloads/routes/add-metadata.png) - ### Step 4: Obtain the domain name, Service path, and gateway address -1. Choose **Routes** in **Application Workloads** on the left navigation bar and click the name of the Route on the right. +1. Select **Routes** in **Application Workloads** on the left navigation bar and click the name of the Route on the right. - ![route-list](/images/docs/project-user-guide/application-workloads/routes/route-list.png) +2. Obtain the domain name and Service path and the gateway address in the **Rules** area. -2. Obtain the domain name and Service path in the **Rules** area and the gateway address in the **Details** area. + * If the [Route access mode](#configure-the-route-access-method) is set to NodePort, the IP address of a Kubernetes cluster node is used as the gateway address and the NodePort is displayed after the domain name. - * If the [Route access method](#configure-the-route-access-method) is set to NodePort, the IP address of a Kubernetes cluster node is used as the gateway address and the NodePort is displayed after the domain name. - - ![obtain-address-nodeport](/images/docs/project-user-guide/application-workloads/routes/obtain-address-nodeport.png) - - * If the [Route access method](#configure-the-route-access-method) is set to LoadBalancer, the gateway address is assigned by the load balancer plugin. - - ![obtain-address-loadbalancer](/images/docs/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png) + * If the [Route access mode](#configure-the-route-access-method) is set to LoadBalancer, the gateway address is assigned by the load balancer plugin. ## Configure Domain Name Resolution -If **Auto Generate** is selected in the [route rule configuration](#step-2-configure-route-rules), you do not need to configure domain name resolution and the domain name is automatically resolved by [nip.io](https://nip.io/) into the gateway address. +If **Auto Generate** is selected in the [routing rule configuration](#step-2-configure-route-rules), you do not need to configure domain name resolution and the domain name is automatically resolved by [nip.io](https://nip.io/) into the gateway address. -If **Specify Domain** is selected in the [route rule configuration](#step-2-configure-route-rules), you need to configure domain name resolution on your DNS server or add ` ` to the `etc/hosts` file of your client machine. +If **Specify Domain** is selected in the [routing rule configuration](#step-2-configure-route-rules), you need to configure domain name resolution on your DNS server or add ` ` to the `etc/hosts` file of your client machine. ## Access the Route -### NodePort access method +### NodePort access mode 1. Log in to a client machine connected to the Route gateway address. 2. Use the `:/` address to access the backend Service of the Route. - ![access-route-nodeport](/images/docs/project-user-guide/application-workloads/routes/access-route-nodeport.png) - ### LoadBalancer access method 1. Log in to a client machine connected to the Route gateway address. 2. Use the `/` address to access the backend Service of the Route. - ![access-route-loadbalancer](/images/docs/project-user-guide/application-workloads/routes/access-route-loadbalancer.png) - {{< notice note >}} If you need to access the Route from outside your private network by using either NodePort or LoadBalancer, depending on your network environment: * You may need to configure traffic forwarding and firewall policies in your infrastructure environment so that the gateway address and port number of the Route can be accessed. -* If **Auto Generate** is selected in the [route rule configuration](#step-2-configure-route-rules), you may need to manually [edit the Route rules](#edit-the-route) to change the gateway address in the Route domain name to the external IP address of your private network. -* If **Specify Domain** is selected in the [route rule configuration](#step-2-configure-route-rules), you may need to change the configuration on your DNS server or in the `etc/hosts` file of your client machine so that the domain name can be resolved into the external IP address of your private network. +* If **Auto Generate** is selected in the [routing rule configuration](#step-2-configure-routing-rules), you may need to manually [edit the routing rules](#edit-the-route) to change the gateway address in the Route domain name to the external IP address of your private network. +* If **Specify Domain** is selected in the [routing rule configuration](#step-2-configure-routing-rules), you may need to change the configuration on your DNS server or in the `etc/hosts` file of your client machine so that the domain name can be resolved into the external IP address of your private network. {{}} @@ -148,32 +116,22 @@ If you need to access the Route from outside your private network by using eithe 1. Choose **Routes** in **Application Workloads** on the left navigation bar and click the name of the Route on the right. - ![route-list](/images/docs/project-user-guide/application-workloads/routes/route-list.png) - -2. Click **Edit Info**, or click **More** and choose an operation from the drop-down list. - * **Edit Info**: Edit the basic information of the Route. The Route name cannot be edited. +2. Click **Edit Information**, or click **More** and choose an operation from the drop-down menu. * **Edit YAML**: Edit the YAML configuration file of the Route. - * **Edit Rules**: Edit the Route rules. + * **Edit Routing Rules**: Edit the Route rules. * **Edit Annotations**: Edit the Route annotations. For more information, see the [official Nginx Ingress controller document](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). * **Delete**: Delete the Route and return to the Route list page. - ![edit-route](/images/docs/project-user-guide/application-workloads/routes/edit-route.png) - ### Resource status Click the **Resource Status** tab to view the Route rules. -![resource-status](/images/docs/project-user-guide/application-workloads/routes/resource-status.png) - ### Metadata Click the **Metadata** tab to view the labels and annotations of the Route. -![metadata](/images/docs/project-user-guide/application-workloads/routes/metadata.png) - ### Events Click the **Events** tab to view the events of the Route. -![events](/images/docs/project-user-guide/application-workloads/routes/events.png) diff --git a/content/en/docs/project-user-guide/application-workloads/services.md b/content/en/docs/project-user-guide/application-workloads/services.md index 7d84755af..efd4d254d 100644 --- a/content/en/docs/project-user-guide/application-workloads/services.md +++ b/content/en/docs/project-user-guide/application-workloads/services.md @@ -16,7 +16,7 @@ For more information, see the [official documentation of Kubernetes](https://kub - **Virtual IP**: It is based on the unique IP generated by the cluster. A service can be accessed through this IP inside the cluster. This type is suitable for most services. Alternatively, a service can also be accessed through a NodePort and LoadBalancer outside the cluster. -- **Headless**: The cluster does not generate an IP address for the service, and the service is directly accessed through the backend Pod IP of the service within the cluster. This type is suitable for backend heterogeneous services, such as services that need to distinguish between master and slave. +- **Headless**: The cluster does not generate an IP address for the service, and the service is directly accessed through the backend Pod IP of the service within the cluster. This type is suitable for backend heterogeneous services, such as services that need to distinguish between master and agent. {{< notice tip>}} @@ -26,13 +26,11 @@ In KubeSphere, stateful and stateless Services are created with a virtual IP by ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Service Types -As shown in the image below, KubeSphere provides three basic methods to create a Service: **Stateless Service**, **Stateful Service**, and **External Service**. Besides, you can also customize a Service through **Specify Workloads** and **Edit by YAML** under **Custom Creation**. - -![create_service_type](/images/docs/project-user-guide/application-workloads/services/service-types.png) +KubeSphere provides three basic methods to create a Service: **Stateless Service**, **Stateful Service**, and **External Service**. Besides, you can also customize a Service through **Specify Workload** and **Edit YAML** under **Customize Service**. - **Stateless Service** @@ -44,13 +42,13 @@ As shown in the image below, KubeSphere provides three basic methods to create a - **External Service** - Different from stateless and stateful Services, an ExternalName Service maps a Service to a DNS name instead of a selector. You need to specify these Services in the **ExternalName** field, indicated by `externalName` in the YAML file. + Different from stateless and stateful Services, an External Service maps a Service to a DNS name instead of a selector. You need to specify these Services in the **External Service Address** field, indicated by `externalName` in the YAML file. -- **Specify Workloads** +- **Specify Workload** Create a Service with existing Pods. -- **Edit by YAML** +- **Edit YAML** Create a Service directly with YAML. You can upload and download YAML configuration files to and from the console. @@ -66,12 +64,8 @@ The value of `annotations:kubesphere.io/serviceType` keywords can be defined as: 1. Go to **Services** under **Application Workloads** of a project and click **Create**. - ![services_lists](/images/docs/project-user-guide/application-workloads/services/click-create.png) - 2. Click **Stateless Service**. - ![stateless_form](/images/docs/project-user-guide/application-workloads/services/stateless-service.png) - {{< notice note >}} The steps of creating a stateful Service and a stateless Service are basically the same. This example only goes through the process of creating a stateless Service for demonstration purpose. @@ -82,15 +76,13 @@ The steps of creating a stateful Service and a stateless Service are basically t 1. In the displayed dialog box, you can see the field **Version** prepopulated with `v1`. You need to define a name for the Service, such as `demo-stateless`. When you finish, click **Next** to continue. - ![stateless_form_1](/images/docs/project-user-guide/application-workloads/services/enter-name.png) - - **Name**: The name of the Service and Deployment, which is also the unique identifier. - **Alias**: The alias name of the Service, making resources easier to identify. - **Version**: It can only contain lowercase letters and numbers. The maximum length of characters is set to 16. {{< notice tip >}} -The value of **Name** is used in both configurations, one for Deployment and the other for Service. You can see the manifest file of the Deployment and the Service by enabling **Edit Mode** in the top-right corner. Below is an example file for your reference. +The value of **Name** is used in both configurations, one for Deployment and the other for Service. You can see the manifest file of the Deployment and the Service by enabling **Edit YAML** in the upper-right corner. Below is an example file for your reference. {{}} @@ -125,15 +117,13 @@ The value of **Name** is used in both configurations, one for Deployment and the app: xxx ``` -### Step 3: Set an image +### Step 3: Set a Pod -To add a container image for the Service, see [Set an image](../deployments/#step-3-set-an-image) for details. - -![stateless_form_2.png](/images/docs/project-user-guide/application-workloads/services/set-image.png) +To add a container image for the Service, see [Set a Pod](../deployments/#step-3-set-a-pod) for details. {{< notice tip >}} -For more information about explanations of dashboard properties, see [Container Image Settings](../container-image-settings/) directly. +For more information about explanations of dashboard properties, see [Pod Settings](../container-image-settings/) directly. {{}} @@ -141,15 +131,11 @@ For more information about explanations of dashboard properties, see [Container To mount a volume for the Service, see [Mount Volumes](../deployments/#step-4-mount-volumes) for details. -![stateless_form_3](/images/docs/project-user-guide/application-workloads/services/set-volumes.png) - ### Step 5: Configure advanced settings -You can set a policy for node scheduling and add metadata which is the same as explained in [Deployments](../deployments/#step-5-configure-advanced-settings). For a Service, you can see two additional options available, **Internet Access** and **Enable Sticky Session**. +You can set a policy for node scheduling and add metadata which is the same as explained in [Deployments](../deployments/#step-5-configure-advanced-settings). For a Service, you can see two additional options available, **External Access** and **Sticky Session**. -![stateless_form_4](/images/docs/project-user-guide/application-workloads/services/advanced-settings.png) - -- Internet Access +- External Access You can expose a Service externally through two methods, NodePort and LoadBalancer. @@ -163,7 +149,7 @@ This value is specified by `.spec.type`. If you select **LoadBalancer**, you nee {{}} -- Enable Sticky Session +- Sticky Session You may want to route all traffic sent from a single client session to the same instance of an app which runs across multiple replicas. This makes better use of caches as it reduces latency. This behavior of load balancing is called Sticky Sessions. @@ -171,46 +157,34 @@ This value is specified by `.spec.type`. If you select **LoadBalancer**, you nee ## Check Service Details -### Detail page +### Details page 1. After a Service is created, you can click on the right to further edit it, such as its metadata (excluding **Name**), YAML, port, and Internet access. - ![stateless_finish](/images/docs/project-user-guide/application-workloads/services/click-dots.png) - - - **Edit**: View and edit the basic information. + - **Edit Information**: View and edit the basic information. - **Edit YAML**: View, upload, download, or update the YAML file. - **Edit Service**: View the access type and set selectors and ports. - - **Edit Internet Access**: Edit the service Internet access method. + - **Edit External Access**: Edit external access method for the Service. - **Delete**: When you delete a Service, associated resources will be displayed. If you check them, they will be deleted together with the Service. -2. Click the name of the Service and you can go to its detail page. - - ![stateless_finish](/images/docs/project-user-guide/application-workloads/services/detail-page.png) +2. Click the name of the Service and you can go to its details page. - Click **More** to expand the drop-down menu which is the same as the one in the Service list. - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). - You can view the container information by clicking a Pod item. - Click the container log icon to view output logs of the container. - - You can view the Pod detail page by clicking the Pod name. + - You can view the Pod details page by clicking the Pod name. ### Resource status -1. Click the **Resource Status** tab to view information about the Service ports, Workloads, and Pods. - - ![services](/images/docs/project-user-guide/application-workloads/services/resource-status.png) +1. Click the **Resource Status** tab to view information about the Service ports, workloads, and Pods. 2. In the **Pods** area, click to refresh the Pod information, and click / to display/hide the containers in each Pod. - ![services](/images/docs/project-user-guide/application-workloads/services/see-pod.png) - ### Metadata Click the **Metadata** tab to view the labels and annotations of the Service. -![services](/images/docs/project-user-guide/application-workloads/services/metadata.png) - ### Events -Click the **Events** tab to view the events of the Service. - -![services](/images/docs/project-user-guide/application-workloads/services/events.png) \ No newline at end of file +Click the **Events** tab to view the events of the Service. \ No newline at end of file diff --git a/content/en/docs/project-user-guide/application-workloads/statefulsets.md b/content/en/docs/project-user-guide/application-workloads/statefulsets.md index f270ca0a4..833c7f925 100644 --- a/content/en/docs/project-user-guide/application-workloads/statefulsets.md +++ b/content/en/docs/project-user-guide/application-workloads/statefulsets.md @@ -1,12 +1,12 @@ --- -title: "StatefulSets" -keywords: 'KubeSphere, Kubernetes, StatefulSets, dashboard, service' -description: 'Learn basic concepts of StatefulSets and how to create StatefulSets in KubeSphere.' +title: "Kubernetes StatefulSet in KubeSphere" +keywords: 'KubeSphere, Kubernetes, StatefulSets, Dashboard, Service' +description: 'Learn basic concepts of StatefulSets and how to create StatefulSets on KubeSphere.' linkTitle: "StatefulSets" weight: 10220 --- -As a workload API object, a StatefulSet is used to manage stateful applications. It is responsible for the deploying, scaling of a set of Pods, and guarantees the ordering and uniqueness of these Pods. +As a workload API object, a Kubernetes StatefulSet is used to manage stateful applications. It is responsible for the deploying, scaling of a set of Pods, and guarantees the ordering and uniqueness of these Pods. Like a Deployment, a StatefulSet manages Pods that are based on an identical container specification. Unlike a Deployment, a StatefulSet maintains a sticky identity for each of their Pods. These Pods are created from the same specification, but are not interchangeable: each has a persistent identifier that it maintains across any rescheduling. @@ -23,64 +23,52 @@ For more information, see the [official documentation of Kubernetes](https://kub ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). -## Create a StatefulSet +## Create a Kubernetes StatefulSet In KubeSphere, a **Headless** service is also created when you create a StatefulSet. You can find the headless service in [Services](../services/) under **Application Workloads** in a project. ### Step 1: Open the dashboard -Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the tab **StatefulSets**. - -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/click-create.png) +Log in to the console as `project-regular`. Go to **Application Workloads** of a project, select **Workloads**, and click **Create** under the **StatefulSets** tab. ### Step 2: Enter basic information Specify a name for the StatefulSet (for example, `demo-stateful`) and click **Next** to continue. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/enter-name.png) - -### Step 3: Set an image +### Step 3: Set a Pod 1. Before you set an image, define the number of replicated Pods in **Pod Replicas** by clicking or , which is indicated by the `.spec.replicas` field in the manifest file. {{< notice tip >}} -You can see the StatefulSet manifest file in YAML format by enabling **Edit Mode** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a StatefulSet. Alternatively, you can follow the steps below to create a StatefulSet via the dashboard. +You can see the StatefulSet manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a StatefulSet. Alternatively, you can follow the steps below to create a StatefulSet via the dashboard. {{}} - - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/set-replicas.png) -2. Click **Add Container Image**. +2. Click **Add Container**. - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/click-add-image.png) - -3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search bar and press **Enter**. - - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/enter-nginx.png) +3. Enter an image name from public Docker Hub or from a [private repository](../../configuration/image-registry/) you specified. For example, enter `nginx` in the search box and press **Enter**. {{< notice note >}} -- Remember to press **Enter** on your keyboard after you enter an image name in the search bar. -- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configurations**. +- Remember to press **Enter** on your keyboard after you enter an image name in the search box. +- If you want to use your private image repository, you should [create an Image Registry Secret](../../configuration/image-registry/) first in **Secrets** under **Configuration**. {{}} 4. Set requests and limits for CPU and memory resources based on your needs. For more information, see [Resource Request and Resource Limit in Container Image Settings](../container-image-settings/#add-container-image). - ![statefulset-request-limit](/images/docs/project-user-guide/application-workloads/statefulsets/set-requests-limits.png) +5. Click **Use Default Ports** for **Port Settings** or you can customize **Protocol**, **Name** and **Container Port**. -5. Click **Use Default Ports** for **Service Settings** or you can customize **Protocol**, **Name** and **Container Port**. +6. Select a policy for image pulling from the drop-down list. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). -6. Select a policy for image pulling from the drop-down menu. For more information, see [Image Pull Policy in Container Image Settings](../container-image-settings/#add-container-image). +7. For other settings (**Health Check**, **Start Command**, **Environment Variables**, **Container Security Context** and **Synchronize Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Pod Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the lower-right corner to continue. -7. For other settings (**Health Checker**, **Start Command**, **Environment Variables**, **Container Security Context** and **Sync Host Timezone**), you can configure them on the dashboard as well. For more information, see detailed explanations of these properties in [Container Image Settings](../container-image-settings/#add-container-image). When you finish, click **√** in the bottom-right corner to continue. +8. Select an update strategy from the drop-down menu. It is recommended you choose **Rolling Update**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). -8. Select an update strategy from the drop-down menu. It is recommended you choose **RollingUpdate**. For more information, see [Update Strategy](../container-image-settings/#update-strategy). - -9. Select a deployment mode. For more information, see [Deployment Mode](../container-image-settings/#deployment-mode). +9. Select a Pod scheduling rule. For more information, see [Pod Scheduling Rules](../container-image-settings/#pod-scheduling-rules). 10. Click **Next** to continue when you finish setting the container image. @@ -88,63 +76,49 @@ You can see the StatefulSet manifest file in YAML format by enabling **Edit Mode StatefulSets can use the volume template, but you must create it in **Storage** in advance. For more information about volumes, visit [Volumes](../../storage/volumes/#mount-a-volume). When you finish, click **Next** to continue. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/mount-volume.png) - ### Step 5: Configure advanced settings -You can set a policy for node scheduling and add metadata in this section. When you finish, click **Create** to complete the whole process of creating a StatefulSet. +You can set a policy for node scheduling and add StatefulSet metadata in this section. When you finish, click **Create** to complete the whole process of creating a StatefulSet. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/advanced-settings.png) +- **Select Nodes** -- **Set Node Scheduling Policy** - - You can allow Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. + Assign Pod replicas to run on specified nodes. It is specified in the field `nodeSelector`. - **Add Metadata** Additional metadata settings for resources such as **Labels** and **Annotations**. -## Check StatefulSet Details +## Check Kubernetes StatefulSet Details -### Detail page +### Details page -1. After a StatefulSet is created, it will be displayed in the list as below. You can click on the right to select options from the menu to modify your StatefulSet. +1. After a StatefulSet is created, it will be displayed in the list. You can click on the right to select options from the menu to modify your StatefulSet. - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/click-dots.png) - - - **Edit**: View and edit the basic information. - - **Edit YAMl**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy the StatefulSet. + - **Edit Information**: View and edit the basic information. + - **Edit YAML**: View, upload, download, or update the YAML file. + - **Re-create**: Re-create the StatefulSet. - **Delete**: Delete the StatefulSet. -2. Click the name of the StatefulSet and you can go to its detail page. - - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/detail-page.png) +2. Click the name of the StatefulSet and you can go to its details page. 3. Click **More** to display what operations about this StatefulSet you can do. - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/click-more.png) - - - **Revision Rollback**: Select the revision to roll back. + - **Roll Back**: Select the revision to roll back. - **Edit Service**: Set the port to expose the container image and the service port. - - **Edit Config Template**: Configure update strategies, containers and volumes. + - **Edit Settings**: Configure update strategies, containers and volumes. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Redeploy**: Redeploy this StatefulSet. + - **Re-create**: Re-create this StatefulSet. - **Delete**: Delete the StatefulSet, and return to the StatefulSet list page. 4. Click the **Resource Status** tab to view the port and Pod information of a StatefulSet. - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/resource-status.png) - - - **Replica Status**: Click or to increase or decrease the number of Pod replicas. - - **Pod detail** - - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/pod-detail.png) + - **Replica Status**: Click or to increase or decrease the number of Pod replicas. + - **Pods** - The Pod list provides detailed information of the Pod (status, node, Pod IP and resource usage). - You can view the container information by clicking a Pod item. - Click the container log icon to view output logs of the container. - - You can view the Pod detail page by clicking the Pod name. + - You can view the Pod details page by clicking the Pod name. ### Revision records @@ -154,17 +128,11 @@ After the resource template of workload is changed, a new log will be generated Click the **Metadata** tab to view the labels and annotations of the StatefulSet. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/metadata.png) - ### Monitoring 1. Click the **Monitoring** tab to view the CPU usage, memory usage, outbound traffic, and inbound traffic of the StatefulSet. - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/monitoring.png) - -2. Click the drop-down menu in the upper-right corner to customize the time range and time interval. - - ![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/set-interval.png) +2. Click the drop-down menu in the upper-right corner to customize the time range and sampling interval. 3. Click / in the upper-right corner to start/stop automatic data refreshing. @@ -174,11 +142,7 @@ Click the **Metadata** tab to view the labels and annotations of the StatefulSet Click the **Environment Variables** tab to view the environment variables of the StatefulSet. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/env-variables.png) - ### Events Click the **Events** tab to view the events of the StatefulSet. -![statefulsets](/images/docs/project-user-guide/application-workloads/statefulsets/events.png) - diff --git a/content/en/docs/project-user-guide/application/app-template.md b/content/en/docs/project-user-guide/application/app-template.md index a7e879a7d..30958f0bc 100644 --- a/content/en/docs/project-user-guide/application/app-template.md +++ b/content/en/docs/project-user-guide/application/app-template.md @@ -1,28 +1,24 @@ --- title: "App Templates" -keywords: 'Kubernetes, chart, Helm, KubeSphere, application, repository, template' +keywords: 'Kubernetes, Chart, Helm, KubeSphere, Application Template, Repository' description: 'Understand the concept of app templates and how they can help to deploy applications within enterprises.' linkTitle: "App Templates" weight: 10110 --- -An app template serves as a way for users to upload, deliver and manage apps. Generally, an app is composed of one or more Kubernetes workloads (for example, [Deployments](../../../project-user-guide/application-workloads/deployments/), [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/) and [DaemonSets](../../../project-user-guide/application-workloads/daemonsets/)) and [Services](../../../project-user-guide/application-workloads/services/) based on how it functions and communicates with the external environment. Apps that are uploaded as app templates are built based on a [Helm](https://helm.sh/) package. +An app template serves as a way for users to upload, deliver, and manage apps. Generally, an app is composed of one or more Kubernetes workloads (for example, [Deployments](../../../project-user-guide/application-workloads/deployments/), [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/) and [DaemonSets](../../../project-user-guide/application-workloads/daemonsets/)) and [Services](../../../project-user-guide/application-workloads/services/) based on how it functions and communicates with the external environment. Apps that are uploaded as app templates are built based on a [Helm](https://helm.sh/) package. ## How App Templates Work You can deliver Helm charts to the public repository of KubeSphere or import a private app repository to offer app templates. -The public repository is also known as the App Store in KubeSphere, accessible to every tenant in a workspace. After [uploading the Helm chart of an app](../../../workspace-administration/upload-helm-based-application/), you can deploy your app to test its functions and submit it for review. Ultimately, you have the option to release it the App Store after it is approved. For more information, see [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). - -![app-store](/images/docs/project-user-guide/applications/app-templates/app-store.png) +The public repository, also known as the App Store on KubeSphere, is accessible to every tenant in a workspace. After [uploading the Helm chart of an app](../../../workspace-administration/upload-helm-based-application/), you can deploy your app to test its functions and submit it for review. Ultimately, you have the option to release it to the App Store after it is approved. For more information, see [Application Lifecycle Management](../../../application-store/app-lifecycle-management/). For a private repository, only users with required permissions are allowed to [add private repositories](../../../workspace-administration/app-repository/import-helm-repository/) in a workspace. Generally, the private repository is built based on object storage services, such as MinIO. After imported to KubeSphere, these private repositories serve as application pools to provide app templates. -![private-app-repository](/images/docs/project-user-guide/applications/app-templates/private-app-repository.png) - {{< notice note >}} -[For individual apps that are uploaded as Helm charts](../../../workspace-administration/upload-helm-based-application/) to KubeSphere, they display in the App Store together with built-in apps after approved and released. Besides, when you select app templates from private app repositories, you can also see **From workspace** in the list, which stores these individual apps uploaded as Helm charts. +[For individual apps that are uploaded as Helm charts](../../../workspace-administration/upload-helm-based-application/) to KubeSphere, they are displayed in the App Store together with built-in apps after approved and released. Besides, when you select app templates from private app repositories, you can also see **Current workspace** in the list, which stores these individual apps uploaded as Helm charts. {{}} diff --git a/content/en/docs/project-user-guide/application/compose-app.md b/content/en/docs/project-user-guide/application/compose-app.md index 2037dd63c..b2564950e 100644 --- a/content/en/docs/project-user-guide/application/compose-app.md +++ b/content/en/docs/project-user-guide/application/compose-app.md @@ -6,32 +6,32 @@ linkTitle: "Create a Microservices-based App" weight: 10140 --- -With each microservice handling a single part of the app's functionality, an app can be divided into different components. These components have their own responsibilities and limitations, independent from each other. In KubeSphere, this kind of app is called **Composing App**, which can be built through newly created Services or existing Services. +With each microservice handling a single part of the app's functionality, an app can be divided into different components. These components have their own responsibilities and limitations, independent from each other. In KubeSphere, this kind of app is called **Composed App**, which can be built through newly created Services or existing Services. This tutorial demonstrates how to create a microservices-based app Bookinfo, which is composed of four Services, and set a customized domain name to access the app. ## Prerequisites -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - `project-admin` needs to [set the project gateway](../../../project-administration/project-gateway/) so that `project-regular` can define a domain name when creating the app. ## Create Microservices that Compose an App -1. Log in to the web console of KubeSphere and navigate to **Apps** in **Application Workloads** of your project. On the **Composing Apps** tab, click **Create Composing App**. +1. Log in to the web console of KubeSphere and navigate to **Apps** in **Application Workloads** of your project. On the **Composed Apps** tab, click **Create**. 2. Set a name for the app (for example, `bookinfo`) and click **Next**. -3. On the **Components** page, you need to create microservices that compose the app. Click **Add Service** and select **Stateless Service**. +3. On the **Services** page, you need to create microservices that compose the app. Click **Create Service** and select **Stateless Service**. 4. Set a name for the Service (e.g `productpage`) and click **Next**. {{< notice note >}} - You can create a Service on the dashboard directly or enable **Edit Mode** in the top-right corner to edit the YAML file. + You can create a Service on the dashboard directly or enable **Edit YAML** in the upper-right corner to edit the YAML file. {{}} -5. Click **Add Container Image** under **Container Image** and enter `kubesphere/examples-bookinfo-productpage-v1:1.13.0` in the search bar to use the Docker Hub image. +5. Click **Add Container** under **Containers** and enter `kubesphere/examples-bookinfo-productpage-v1:1.13.0` in the search box to use the Docker Hub image. {{< notice note >}} @@ -39,11 +39,11 @@ This tutorial demonstrates how to create a microservices-based app Bookinfo, whi {{}} -6. Click **Use Default Ports**. For more information about image settings, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/). Click **√** in the bottom-right corner and **Next** to continue. +6. Click **Use Default Ports**. For more information about image settings, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). Click **√** in the lower-right corner and **Next** to continue. -7. On the **Mount Volumes** page, [add a volume](../../../project-user-guide/storage/volumes/) or click **Next** to continue. +7. On the **Volume Settings** page, [add a volume](../../../project-user-guide/storage/volumes/) or click **Next** to continue. -8. Click **Add** on the **Advanced Settings** page directly. +8. Click **Create** on the **Advanced Settings** page. 9. Similarly, add the other three microservices for the app. Here is the image information: @@ -55,13 +55,11 @@ This tutorial demonstrates how to create a microservices-based app Bookinfo, whi 10. When you finish adding microservices, click **Next**. -11. On the **Internet Access** page, click **Add Route Rule**. On the **Specify Domain** tab, set a domain name for your app (for example, `demo.bookinfo`) and select `http` in the **Protocol** field. For `Paths`, select the Service `productpage` and port `9080`. Click **OK** to continue. - - ![route](/images/docs/project-user-guide/applications/create-a-microservices-based-app/route.png) +11. On the **Route Settings** page, click **Add Routing Rule**. On the **Specify Domain** tab, set a domain name for your app (for example, `demo.bookinfo`) and select `HTTP` in the **Protocol** field. For `Paths`, select the Service `productpage` and port `9080`. Click **OK** to continue. {{< notice note >}} -The button **Add Route Rule** is not visible if the project gateway is not set. +The button **Add Routing Rule** is not visible if the project gateway is not set. {{}} @@ -84,13 +82,9 @@ The button **Add Route Rule** is not visible if the project gateway is not set. {{}} -2. In **Composing Apps**, click the app you just created. +2. In **Composed Apps**, click the app you just created. -3. In **Application Components**, click **Click to visit** to access the app. - - ![click-to-visit](/images/docs/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png) - - ![dashboard](/images/docs/project-user-guide/applications/create-a-microservices-based-app/dashboard.png) +3. In **Resource Status**, click **Access Service** under **Routes** to access the app. {{< notice note >}} @@ -100,7 +94,3 @@ The button **Add Route Rule** is not visible if the project gateway is not set. 4. Click **Normal user** and **Test user** respectively to see other **Services**. - ![review-page](/images/docs/project-user-guide/applications/create-a-microservices-based-app/review-page.png) - - - diff --git a/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md b/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md index b6d9d0493..f131f373c 100644 --- a/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md +++ b/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md @@ -13,70 +13,48 @@ This tutorial demonstrates how to quickly deploy [NGINX](https://www.nginx.com/) ## Prerequisites - You have enabled [OpenPitrix (App Store)](../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be a platform regular user invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user must be invited to the project and granted the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Hands-on Lab ### Step 1: Deploy NGINX from the App Store -1. Log in to the web console of KubeSphere as `project-regular` and click **App Store** in the top-left corner. +1. Log in to the web console of KubeSphere as `project-regular` and click **App Store** in the upper-left corner. {{< notice note >}} - You can also go to **Apps** under **Application Workloads** in your project, click **Deploy New App**, and select **From App Store** to go to the App Store. + You can also go to **Apps** under **Application Workloads** in your project, click **Create**, and select **From App Store** to go to the App Store. {{}} -2. Find NGINX and click **Deploy** on the **App Information** page. - - ![nginx-in-app-store](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png) - - ![deploy-nginx](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png) +2. Search for NGINX, click it, and click **Install** on the **App Information** page. Make sure you click **Agree** in the displayed **App Deploy Agreement** dialog box. 3. Set a name and select an app version. Make sure NGINX is deployed in `demo-project` and click **Next**. - ![confirm-deployment](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png) - -4. In **App Configurations**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Deploy**. - - ![edit-config-nginx](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png) - - ![manifest-file](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png) +4. In **App Settings**, specify the number of replicas to deploy for the app and enable Ingress based on your needs. When you finish, click **Install**. {{< notice note >}} - To specify more values for NGINX, use the toggle switch to see the app’s manifest in YAML format and edit its configurations. + To specify more values for NGINX, use the toggle to see the app’s manifest in YAML format and edit its configurations. {{}} 5. Wait until NGINX is up and running. - ![nginx-running](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png) - ### Step 2: Access NGINX To access NGINX outside the cluster, you need to expose the app through a NodePort first. -1. Go to **Services** and click the service name of NGINX. +1. Go to **Services** in the project `demo-project` and click the service name of NGINX. - ![nginx-service](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png) - -2. On the Service detail page, click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-internet-access](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png) +2. On the Service details page, click **More** and select **Edit External Access** from the drop-down menu. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png) - -4. Under **Service Ports**, you can see the port is exposed. - - ![exposed-port](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png) +4. Under **Ports**, view the exposed port. 5. Access NGINX through `:`. - ![access-nginx](/images/docs/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png) - {{< notice note >}} You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed. diff --git a/content/en/docs/project-user-guide/application/deploy-app-from-template.md b/content/en/docs/project-user-guide/application/deploy-app-from-template.md index 0bdc3332f..a1831e5e5 100644 --- a/content/en/docs/project-user-guide/application/deploy-app-from-template.md +++ b/content/en/docs/project-user-guide/application/deploy-app-from-template.md @@ -13,7 +13,7 @@ This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) ## Prerequisites - You have enabled [OpenPitrix (App Store)](../../../pluggable-components/app-store/). -- You have completed the tutorial of [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). Namely, you must have a workspace, a project and two accounts (`ws-admin` and `project-regular`). `ws-admin` must be granted the role of `workspace-admin` in the workspace and `project-regular` must be granted the role of `operator` in the project. +- You have completed the tutorial of [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). Namely, you must have a workspace, a project and two users (`ws-admin` and `project-regular`). `ws-admin` must be granted the role of `workspace-admin` in the workspace and `project-regular` must be granted the role of `operator` in the project. ## Hands-on Lab @@ -21,15 +21,9 @@ This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) 1. Log in to the web console of KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. - ![add-app-repo](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png) +2. In the displayed dialog box, enter `test-repo` for the app repository name and `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/` for the repository URL. Click **Validate** to verify the URL, set **Synchronization Interval** based on your needs, and click **OK**. -2. In the dialog that appears, enter `test-repo` for the app repository name and `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/` for the repository URL. Click **Validate** to verify the URL and click **OK** to continue. - - ![input-repo-info](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png) - -3. Your repository appears in the list after successfully imported to KubeSphere. - - ![repository-list](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png) +3. Your repository is displayed in the list after successfully imported to KubeSphere. {{< notice note >}} @@ -39,13 +33,9 @@ This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) ### Step 2: Deploy Grafana from app templates -1. Log out of KubeSphere and log back in as `project-regular`. In your project, choose **Apps** under **Application Workloads** and click **Deploy New App**. +1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Apps** under **Application Workloads** and click **Create**. - ![create-new-app](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png) - -2. Select **From App Templates** from the pop-up dialog. - - ![select-app-templates](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png) +2. Select **From App Template** in the displayed dialog box. **From App Store**: Choose built-in apps and apps uploaded individually as Helm charts. @@ -53,17 +43,13 @@ This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) 3. Select `test-repo` from the drop-down list, which is the private app repository just uploaded. - ![private-app-template](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png) - {{< notice note >}} - The option **From workspace** in the list represents the workspace app pool, which contains apps uploaded as Helm charts. They are also part of app templates. + The option **Current workspace** in the list represents the workspace app pool, which contains apps uploaded as Helm charts. They are also part of app templates. {{}} -4. Enter `Grafana` in the search bar to find the app, and then click it to deploy it. - - ![search-grafana](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png) +4. Enter `grafana` in the search box to search for the app, and then click it to deploy it. {{< notice note >}} @@ -71,17 +57,11 @@ This tutorial demonstrates how to quickly deploy [Grafana](https://grafana.com/) {{}} -5. You can view its app information and configuration files. Under **Versions**, select a version number from the list and click **Deploy**. +5. Its app information and configuration files are also displayed. Under **Version**, select a version number from the list and click **Install**. - ![deploy-grafana](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png) - -6. Set an app name and confirm the version and deployment location. Click **Next** to continue. - - ![confirm-info](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png) +6. Set an app name and confirm the version and deployment location. Click **Next**. -7. In **App Configurations**, you can manually edit the manifest file or click **Deploy** directly. - - ![app-config](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png) +7. In **App Settings**, manually edit the manifest file or click **Install** directly. 8. Wait for Grafana to be up and running. @@ -91,37 +71,19 @@ To access Grafana outside the cluster, you need to expose the app through a Node 1. Go to **Services** and click the service name of Grafana. - ![grafana-services](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png) - -2. Click **More** and select **Edit Internet Access** from the drop-down menu. - - ![edit-access](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png) +2. Click **More** and select **Edit External Access** from the drop-down menu. 3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/). - ![nodeport](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png) - -4. Under **Service Ports**, you can see the port is exposed. - - ![exposed-port](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png) +4. Under **Ports**, view the exposed port. ### Step 4: Access Grafana -1. To access the Grafana dashboard, you need the username and password. Navigate to **Secrets** and click the item that has the same name as the app name. +1. To access the Grafana dashboard, you need the username and password. Go to **Secrets** under **Configuration** and click the item that has the same name as the app name. - ![grafana-secret](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png) +2. On the details page, click the eye icon to view the username and password. -2. On the detail page, click the eye icon and you can see the username and password. - - ![secret-page](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png) - - ![click-eye-icon](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png) - -2. Access Grafana through `:`. - - ![grafana-UI](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png) - - ![home-page](/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png) +3. Access Grafana through `:`. {{< notice note >}} diff --git a/content/en/docs/project-user-guide/configuration/_index.md b/content/en/docs/project-user-guide/configuration/_index.md index e620484f1..f23595117 100644 --- a/content/en/docs/project-user-guide/configuration/_index.md +++ b/content/en/docs/project-user-guide/configuration/_index.md @@ -1,5 +1,5 @@ --- -linkTitle: "Configurations" +linkTitle: "Configuration" weight: 10400 _build: diff --git a/content/en/docs/project-user-guide/configuration/configmaps.md b/content/en/docs/project-user-guide/configuration/configmaps.md index 76ad5b37d..5c138defc 100644 --- a/content/en/docs/project-user-guide/configuration/configmaps.md +++ b/content/en/docs/project-user-guide/configuration/configmaps.md @@ -16,26 +16,24 @@ This tutorial demonstrates how to create a ConfigMap in KubeSphere. ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a ConfigMap -1. Log in to the console as `project-regular`. Go to **Configurations** of a project, choose **ConfigMaps** and click **Create**. +1. Log in to the console as `project-regular`. Go to **Configuration** of a project, select **ConfigMaps** and click **Create**. -2. In the dialog that appears, specify a name for the ConfigMap (for example, `demo-configmap`) and click **Next** to continue. +2. In the displayed dialog box, specify a name for the ConfigMap (for example, `demo-configmap`) and click **Next** to continue. {{< notice tip >}} -You can see the ConfigMap manifest file in YAML format by enabling **Edit Mode** in the top-right corner. KubeSphere allows you to edit the manifest file directly to create a ConfigMap. Alternatively, you can follow the steps below to create a ConfigMap via the dashboard. +You can see the ConfigMap manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a ConfigMap. Alternatively, you can follow the steps below to create a ConfigMap via the dashboard. {{}} -3. On the **ConfigMap Settings** tab, configure values by clicking **Add Data**. +3. On the **Data Settings** tab, configure values by clicking **Add Data**. 4. Enter a key-value pair. For example: - ![key-value](/images/docs/project-user-guide/configurations/configmaps/key-value.png) - {{< notice note >}} - key-value pairs displays under the field `data` in the manifest. @@ -44,27 +42,25 @@ You can see the ConfigMap manifest file in YAML format by enabling **Edit Mode** {{}} -5. Click **√** in the bottom-right corner to save it and click **Add Data** again if you want to add more key-value pairs. +5. Click **√** in the lower-right corner to save it and click **Add Data** again if you want to add more key-value pairs. 6. Click **Create** to generate the ConfigMap. ## View ConfigMap Details -1. After a ConfigMap is created, it displays on the **ConfigMaps** page. You can click on the right and select the operation below from the drop-down list. +1. After a ConfigMap is created, it is displayed on the **ConfigMaps** page. You can click on the right and select the operation below from the drop-down list. - - **Edit**: View and edit the basic information. + - **Edit Information**: View and edit the basic information. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Modify Config**: Modify the key-value pair of the ConfigMap. + - **Edit Settings**: Modify the key-value pair of the ConfigMap. - **Delete**: Delete the ConfigMap. -2. Click the name of the ConfigMap to go to its detail page. Under the tab **Detail**, you can see all the key-value pairs you have added for the ConfigMap. - - ![detail-page](/images/docs/project-user-guide/configurations/configmaps/detail-page.png) +2. Click the name of the ConfigMap to go to its details page. Under the tab **Data**, you can see all the key-value pairs you have added for the ConfigMap. 3. Click **More** to display what operations about this ConfigMap you can do. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Modify Config**: Modify the key-value pair of the ConfigMap. + - **Edit Settings**: Modify the key-value pair of the ConfigMap. - **Delete**: Delete the ConfigMap, and return to the list page. 4. Click **Edit Information** to view and edit the basic information. @@ -72,6 +68,4 @@ You can see the ConfigMap manifest file in YAML format by enabling **Edit Mode** ## Use a ConfigMap -When you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/) or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/), you may need to add environment variables for containers. On the **Container Image** page, check **Environment Variables** and click **Use ConfigMap or Secret** to use a ConfigMap from the list. - -![use-configmap](/images/docs/project-user-guide/configurations/configmaps/use-configmap.jpg) \ No newline at end of file +When you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/) or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/), you may need to add environment variables for containers. On the **Add Container** page, check **Environment Variables** and click **Use ConfigMap or Secret** to use a ConfigMap from the list. diff --git a/content/en/docs/project-user-guide/configuration/image-registry.md b/content/en/docs/project-user-guide/configuration/image-registry.md index 6bb20dae8..1469cbf9c 100644 --- a/content/en/docs/project-user-guide/configuration/image-registry.md +++ b/content/en/docs/project-user-guide/configuration/image-registry.md @@ -1,18 +1,18 @@ --- title: "Image Registries" keywords: 'KubeSphere, Kubernetes, docker, Secrets' -description: 'Learn how to create an image registry in KubeSphere.' +description: 'Learn how to create an image registry on KubeSphere.' linkTitle: "Image Registries" weight: 10430 --- -A Docker image is a read-only template that can be used to deploy container services. Each image has a unique identifier (i.e. image name:tag). For example, an image can contain a complete package of an Ubuntu operating system environment with only Apache and a few applications installed. An image registry is used to store and distribute Docker images. +A Docker image is a read-only template that can be used to deploy container services. Each image has a unique identifier (for example, image name:tag). For example, an image can contain a complete package of an Ubuntu operating system environment with only Apache and a few applications installed. An image registry is used to store and distribute Docker images. This tutorial demonstrates how to create Secrets for different image registries. ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Create a Secret @@ -20,9 +20,7 @@ When you create workloads, [Services](../../../project-user-guide/application-wo ### Step 1: Open the dashboard -Log in to the web console of KubeSphere as `project-regular`. Go to **Configurations** of a project, choose **Secrets** and click **Create**. - -![open-dashboard](/images/docs/project-user-guide/configurations/image-registries/open-dashboard.png) +Log in to the web console of KubeSphere as `project-regular`. Go to **Configuration** of a project, select **Secrets** and click **Create**. ### Step 2: Enter basic information @@ -30,30 +28,24 @@ Specify a name for the Secret (for example, `demo-registry-secret`) and click ** {{< notice tip >}} -You can see the Secret's manifest file in YAML format by enabling **Edit Mode** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. +You can see the Secret's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. {{}} -![create-secret](/images/docs/project-user-guide/configurations/image-registries/create-secret.png) - ### Step 3: Specify image registry information -Select **kubernetes.io/dockerconfigjson (Image Registry Secret)** for **Type**. To use images from your private registry as you create application workloads, you need to specify the following fields. +Select **Image registry information** for **Type**. To use images from your private registry as you create application workloads, you need to specify the following fields. - **Registry Address**. The address of the image registry that stores images for you to use when creating application workloads. - **Username**. The account name you use to log in to the registry. - **Password**. The password you use to log in to the registry. - **Email** (optional). Your email address. -![image-registry-info](/images/docs/project-user-guide/configurations/image-registries/image-registry-info.png) - #### Add the Docker Hub registry 1. Before you add your image registry in [Docker Hub](https://hub.docker.com/), make sure you have an available Docker Hub account. On the **Secret Settings** page, enter `docker.io` for **Registry Address** and enter your Docker ID and password for **User Name** and **Password**. Click **Validate** to check whether the address is available. - ![validate-registry-address](/images/docs/project-user-guide/configurations/image-registries/validate-registry-address.png) - -2. Click **Create**. Later, the Secret will be displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). +2. Click **Create**. Later, the Secret is displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). #### Add the Harbor image registry @@ -75,7 +67,7 @@ Select **kubernetes.io/dockerconfigjson (Image Registry Secret)** for **Type**. - `Environment` represents [dockerd options](https://docs.docker.com/engine/reference/commandline/dockerd/). - - `--insecure-registry` is required by the Docker daemon for the communication with an insecure registry. Refer to [docker docs](https://docs.docker.com/engine/reference/commandline/dockerd/#insecure-registries) for its syntax. + - `--insecure-registry` is required by the Docker daemon for the communication with an insecure registry. Refer to [Docker documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#insecure-registries) for its syntax. {{}} @@ -89,9 +81,7 @@ Select **kubernetes.io/dockerconfigjson (Image Registry Secret)** for **Type**. sudo systemctl restart docker ``` -3. Go back to the **Secret Settings** page and select **kubernetes.io/dockerconfigjson (Image Registry Secret)** for **Type**. Enter your Harbor IP address for **Registry Address** and enter the username and password. - - ![harbor-address](/images/docs/project-user-guide/configurations/image-registries/harbor-address.png) +3. Go back to the **Data Settings** page and select **Image registry information** for **Type**. Enter your Harbor IP address for **Registry Address** and enter the username and password. {{< notice note >}} @@ -99,7 +89,7 @@ Select **kubernetes.io/dockerconfigjson (Image Registry Secret)** for **Type**. {{}} -4. Click **Create**. Later, the Secret will be displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). +4. Click **Create**. Later, the Secret is displayed on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../../project-user-guide/configuration/secrets/#check-secret-details). **HTTPS** @@ -107,6 +97,4 @@ For the integration of the HTTPS-based Harbor registry, refer to [Harbor Documen ## Use an Image Registry -When you set images, you can select the private image registry if the Secret of it is created in advance. For example, click the arrow on the **Container Image** page to expand the registry list when you create a [Deployment](../../../project-user-guide/application-workloads/deployments/). After you choose the image registry, enter the image name and tag to use the image. - -![use-image-registry](/images/docs/project-user-guide/configurations/image-registries/use-image-registry.png) \ No newline at end of file +When you set images, you can select the private image registry if the Secret of it is created in advance. For example, click the arrow on the **Add Container** page to expand the registry list when you create a [Deployment](../../../project-user-guide/application-workloads/deployments/). After you choose the image registry, enter the image name and tag to use the image. diff --git a/content/en/docs/project-user-guide/configuration/secrets.md b/content/en/docs/project-user-guide/configuration/secrets.md index 23f5f2968..c2043b065 100644 --- a/content/en/docs/project-user-guide/configuration/secrets.md +++ b/content/en/docs/project-user-guide/configuration/secrets.md @@ -1,7 +1,7 @@ --- -title: "Secrets" +title: "Kubernetes Secrets in KubeSphere" keywords: 'KubeSphere, Kubernetes, Secrets' -description: 'Learn how to create a Secret in KubeSphere.' +description: 'Learn how to create a Secret on KubeSphere.' linkTitle: "Secrets" weight: 10410 --- @@ -16,15 +16,13 @@ This tutorial demonstrates how to create a Secret in KubeSphere. ## Prerequisites -You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). -## Create a Secret +## Create a Kubernetes Secret ### Step 1: Open the dashboard -Log in to the console as `project-regular`. Go to **Configurations** of a project, choose **Secrets** and click **Create**. - -![create-secrets](/images/docs/project-user-guide/configurations/secrets/create-secrets.png) +Log in to the console as `project-regular`. Go to **Configuration** of a project, select **Secrets** and click **Create**. ### Step 2: Enter basic information @@ -32,17 +30,13 @@ Specify a name for the Secret (for example, `demo-secret`) and click **Next** to {{< notice tip >}} -You can see the Secret's manifest file in YAML format by enabling **Edit Mode** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. +You can see the Secret's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a Secret. Alternatively, you can follow the steps below to create a Secret via the dashboard. {{}} -![set-secret](/images/docs/project-user-guide/configurations/secrets/set-secret.png) - ### Step 3: Set a Secret -1. Under the tab **Secret Settings**, you must choose a Secret type. In KubeSphere, you can create the following types of Secrets, indicated by the `type` field. - - ![secret-type](/images/docs/project-user-guide/configurations/secrets/secret-type.png) +1. Under the tab **Data Settings**, you must select a Secret type. In KubeSphere, you can create the following Kubernetes Secret types, indicated by the `type` field. {{< notice note >}} @@ -50,42 +44,28 @@ You can see the Secret's manifest file in YAML format by enabling **Edit Mode** {{}} - - **Opaque (Default)**. The type of [Opaque](https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets) in Kubernetes, which is also the default Secret type in Kubernetes. You can create arbitrary user-defined data for this type of Secret. Click **Add Data** to add key-value pairs for it. + - **Default**. The type of [Opaque](https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets) in Kubernetes, which is also the default Secret type in Kubernetes. You can create arbitrary user-defined data for this type of Secret. Click **Add Data** to add key-value pairs for it. - ![default-secret](/images/docs/project-user-guide/configurations/secrets/default-secret.png) + - **TLS information**. The type of [kubernetes.io/tls](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) in Kubernetes, which is used to store a certificate and its associated key that are typically used for TLS, such as TLS termination of Ingress resources. You must specify **Credential** and **Private Key** for it, indicated by `tls.crt` and `tls.key` in the YAML file respectively. - - **kubernetes.io/tis (TLS)**. The type of [kubernetes.io/tls](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) in Kubernetes, which is used to store a certificate and its associated key that are typically used for TLS, such as TLS termination of Ingress resources. You must specify **Credential** and **Private Key** for it, indicated by `tls.crt` and `tls.key` in the YAML file respectively. + - **Image registry information**. The type of [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#docker-config-secrets) in Kubernetes, which is used to store the credentials for accessing a Docker registry for images. For more information, see [Image Registries](../image-registry/). - ![tls](/images/docs/project-user-guide/configurations/secrets/tls.png) + - **Username and password**. The type of [kubernetes.io/basic-auth](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret) in Kubernetes, which is used to store credentials needed for basic authentication. You must specify **Username** and **Password** for it, indicated by `username` and `password` in the YAML file respectively. - - **kubernetes.io/dockerconfigjson (Image Registry Secret)**. The type of [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#docker-config-secrets) in Kubernetes, which is used to store the credentials for accessing a Docker registry for images. For more information, see [Image Registries](../image-registry/). +2. For this tutorial, select the default type of Secret. Click **Add Data** and enter the **Key** (`MYSQL_ROOT_PASSWORD`) and **Value** (`123456`) to specify a Secret for MySQL. - ![image-registry-secret](/images/docs/project-user-guide/configurations/secrets/image-registry-secret.png) - - - **kubernetes.io/basic-auth (Account Password Secret)**. The type of [kubernetes.io/basic-auth](https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret) in Kubernetes, which is used to store credentials needed for basic authentication. You must specify **Username** and **Password** for it, indicated by `username` and `password` in the YAML file respectively. - - ![account-password-secret](/images/docs/project-user-guide/configurations/secrets/account-password-secret.png) - -2. For this tutorial, select the default type of Secret. Click **Add Data** and enter the **Key** (`MYSQL_ROOT_PASSWORD`) and **Value** (`123456`) as below to specify a Secret for MySQL. - - ![enter-key](/images/docs/project-user-guide/configurations/secrets/enter-key.png) - -3. Click **√** in the bottom-right corner to confirm. You can continue to add key-value pairs to the Secret or click **Create** to finish the creation. For more information about how to use the Secret, see [Compose and Deploy WordPress](../../../quick-start/wordpress-deployment/#task-3-create-an-application). +3. Click **√** in the lower-right corner to confirm. You can continue to add key-value pairs to the Secret or click **Create** to finish the creation. For more information about how to use the Secret, see [Compose and Deploy WordPress](../../../quick-start/wordpress-deployment/#task-3-create-an-application). ## Check Secret Details -1. After a Secret is created, it will be displayed in the list as below. You can click on the right and select the operation from the menu to modify it. +1. After a Secret is created, it will be displayed in the list. You can click on the right and select the operation from the menu to modify it. - ![secret-list](/images/docs/project-user-guide/configurations/secrets/secret-list.png) - - - **Edit**: View and edit the basic information. + - **Edit Information**: View and edit the basic information. - **Edit YAML**: View, upload, download, or update the YAML file. - - **Edit Seret**: Modify the key-value pair of the Secret. + - **Edit Settings**: Modify the key-value pair of the Secret. - **Delete**: Delete the Secret. -2. Click the name of the Secret and you can go to its detail page. Under the tab **Detail**, you can see all the key-value pairs you have added for the Secret. - - ![secret-detail-page](/images/docs/project-user-guide/configurations/secrets/secret-detail-page.png) +2. Click the name of the Secret and you can go to its details page. Under the tab **Data**, you can see all the key-value pairs you have added for the Secret. {{< notice note >}} @@ -95,23 +75,17 @@ As mentioned above, KubeSphere automatically converts the value of a key into it 3. Click **More** to display what operations about this Secret you can do. - ![click-more](/images/docs/project-user-guide/configurations/secrets/click-more.png) - - **Edit YAML**: View, upload, download, or update the YAML file. - **Edit Secret**: Modify the key-value pair of the Secret. - **Delete**: Delete the Secret, and return to the list page. -## Use a Secret +## How to Use a Kubernetes Secret Generally, you need to use a Secret when you create workloads, [Services](../../../project-user-guide/application-workloads/services/), [Jobs](../../../project-user-guide/application-workloads/jobs/) or [CronJobs](../../../project-user-guide/application-workloads/cronjobs/). For example, you can select a Secret for a code repository. For more information, see [Image Registries](../image-registry/). -![use-secret-repository](/images/docs/project-user-guide/configurations/secrets/use-secret-repository.png) - Alternatively, you may need to add environment variables for containers. On the **Container Image** page, select **Environment Variables** and click **Use ConfigMap or Secret** to use a Secret from the list. -![use-secret-image](/images/docs/project-user-guide/configurations/secrets/use-secret-image.png) - ## Create the Most Common Secrets This section shows how to create Secrets from your Docker Hub account and GitHub account. @@ -120,9 +94,9 @@ This section shows how to create Secrets from your Docker Hub account and GitHub 1. Log in to KubeSphere as `project-regular` and go to your project. Select **Secrets** from the navigation bar and click **Create** on the right. -2. Set a name, such as `dockerhub-id`, and click **Next**. On the **Secret Settings** page, fill in the following fields and click **Validate** to verify whether the information provided is valid. +2. Set a name, such as `dockerhub-id`, and click **Next**. On the **Data Settings** page, fill in the following fields and click **Validate** to verify whether the information provided is valid. - **Type**: Select **kubernetes.io/dockerconfigjson (Image Registry Secret)**. + **Type**: Select **Image registry information**. **Registry Address**: Enter the Docker Hub registry address, such as `docker.io`. @@ -130,22 +104,18 @@ This section shows how to create Secrets from your Docker Hub account and GitHub **Password**: Enter your Docker Hub password. - ![docker-hub-secret](/images/docs/project-user-guide/configurations/secrets/docker-hub-secret.png) - 3. Click **Create** to finish. ### Create the GitHub Secret 1. Log in to KubeSphere as `project-regular` and go to your project. Select **Secrets** from the navigation bar and click **Create** on the right. -2. Set a name, such as `github-id`, and click **Next**. On the **Secret Settings** page, fill in the following fields. +2. Set a name, such as `github-id`, and click **Next**. On the **Data Settings** page, fill in the following fields. - **Type**: Select **kubernetes.io/basic-auth (Account Password Secret)**. + **Type**: Select **Username and password**. **Username**: Enter your GitHub account. **Password**: Enter your GitHub password. - ![github-secret](/images/docs/project-user-guide/configurations/secrets/github-secret.png) - -3. Click **Create** to finish. \ No newline at end of file +3. Click **Create** to finish. diff --git a/content/en/docs/project-user-guide/configuration/serviceaccounts.md b/content/en/docs/project-user-guide/configuration/serviceaccounts.md index 2e9384367..d05ffb2a9 100644 --- a/content/en/docs/project-user-guide/configuration/serviceaccounts.md +++ b/content/en/docs/project-user-guide/configuration/serviceaccounts.md @@ -1,9 +1,48 @@ --- title: "Service Accounts" -keywords: 'KubeSphere, Kubernetes, ServiceAccounts' -description: 'Learn how to create Service Accounts in KubeSphere.' +keywords: 'KubeSphere, Kubernetes, Service Accounts' +description: 'Learn how to create service accounts on KubeSphere.' linkTitle: "Service Accounts" weight: 10440 --- -TBD \ No newline at end of file +A [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) provides an identity for processes that run in a Pod. When accessing a cluster, a user is authenticated by the API server as a particular user account. Processes in containers inside Pods are authenticated as a particular service account when these processes contact the API server. + +This document describes how to create service accounts on KubeSphere. + +## Prerequisites + +You need to create a workspace, a project, and a user (`project-regular`), and invite the user to the project and assign it the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create Service Account + +### Step 1: Log in to KubeSphere + +1. Log in to the KubeSphere console as `project-regular`. Go to **Configuration** of a project and click **Service Accounts**. A service account named `default` is displayed on the **Service Accounts** page as it is automatically created when the project is created. + + {{< notice note >}} + + If no service account is specified when creating workloads in a project, the service account `default` in the same project is automatically assigned. + + {{}} + +2. Click **Create**. + +### Step 2: Set a service account + +1. In the displayed dialog box, set the following parameters: + - **Name**: A unique identifier for the service account. + - **Alias**: An alias for the service account to help you better identify the service account. + - **Description**: A brief introduction of the service account. + - **Project Role**: Select a project role from the drop-down list for the service account. Different project roles have [different permissions](../../../project-administration/role-and-member-management/#built-in-roles) in a project. +2. Click **Create** after you finish setting the parameters. The service account created is displayed on the **Service Accounts** page. + +## Service Account Details Page + +1. Click the service account created to go to its details page. +2. Click **Edit Information** to edit its basic information, or click **More** to select an operation from the drop-down menu. + - **Edit YAML**: View, update, or download the YAML file. + - **Change Role**: Change the project role of the service account. + - **Delete**: Delete the service account and return to the previous page. +3. On the **Resource Status** tab, details about the corresponding Secret and the kubeconfig of the service account are displayed. + diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md b/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md index 771ba3df4..5522caf7b 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md @@ -11,16 +11,16 @@ This tutorial demonstrates how to monitor and visualize MySQL metrics. ## Prerequisites -- You need to [enable the App Store](../../../../pluggable-components/app-store/). MySQL and MySQL Exporter will be deployed from the App Store. -- You need to create a workspace, a project, and an account (`project-regular`) for this tutorial. The account needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../../quick-start/create-workspace-and-project/). +- You need to [enable the App Store](../../../../pluggable-components/app-store/). MySQL and MySQL Exporter are available in the App Store. +- You need to create a workspace, a project, and a user (`project-regular`) for this tutorial. The user needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). ## Step 1: Deploy MySQL To begin with, you need to [deploy MySQL from the App Store](../../../../application-store/built-in-apps/mysql-app/). -1. Go to your project and click **App Store** in the top-left corner. +1. Go to your project and click **App Store** in the upper-left corner. -2. Click **MySQL** to go to its product detail page and click **Deploy** on the **App Information** tab. +2. Click **MySQL** to go to its details page and click **Install** on the **App Information** tab. {{< notice note >}} @@ -28,25 +28,21 @@ MySQL is a built-in app in the KubeSphere App Store, which means it can be deplo {{}} -3. Under **Basic Information**, set an **App Name** and select an **App Version**. Select the project where the app will be deployed under **Deployment Location** and click **Next**. +3. Under **Basic Information**, set a **Name** and select a **Version**. Select the project where the app is deployed under **Location** and click **Next**. -4. Under **App Configurations**, set a root password by uncommenting the `mysqlRootPassword` field and click **Deploy**. - - ![mysql-root-password](/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png) +4. Under **App Settings**, set a root password by uncommenting the `mysqlRootPassword` field and click **Install**. 5. Wait until MySQL is up and running. - ![mysql-ready](/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png) - ## Step 2: Deploy MySQL Exporter You need to deploy MySQL Exporter in the same project on the same cluster. MySQL Exporter is responsible for querying the status of MySQL and reports the data in Prometheus format. 1. Go to **App Store** and click **MySQL Exporter**. -2. On the product detail page, click **Deploy**. +2. On the details page, click **Install**. -3. Under **Basic Information**, set an **App Name** and select an **App Version**. Select the same project where MySQL is deployed under **Deployment Location** and click **Next**. +3. Under **Basic Information**, set a **Name** and select a **Version**. Select the same project where MySQL is deployed under **Location** and click **Next**. 4. Make sure `serviceMonitor.enabled` is set to `true`. The built-in MySQL Exporter sets it to `true` by default, so you don't need to manually change the value of `serviceMonitor.enabled`. @@ -54,27 +50,19 @@ You need to deploy MySQL Exporter in the same project on the same cluster. MySQL You must enable the ServiceMonitor CRD if you are using external exporter Helm charts. Those charts usually disable ServiceMonitors by default and require manual modification. {{}} -5. Modify MySQL connection parameters. MySQL Exporter needs to connect to the target MySQL. In this tutorial, MySQL is installed with the service name `mysql-dh3ily`. Navigate to `mysql` in the configuration file, and set `host` to `mysql-dh3ily`, `pass` to `testing`, and `user` to `root` as below. Note that your MySQL service may be created with **a different name**. - - ![mysql-app-configurations](/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-app-configurations.png) - - Click **Deploy**. +5. Modify MySQL connection parameters. MySQL Exporter needs to connect to the target MySQL. In this tutorial, MySQL is installed with the service name `mysql-dh3ily`. Navigate to `mysql` in the configuration file, and set `host` to `mysql-dh3ily`, `pass` to `testing`, and `user` to `root`. Note that your MySQL service may be created with **a different name**. After you finish editing the file, click **Install**. 6. Wait until MySQL Exporter is up and running. - ![mysql-exporter-ready](/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png) - ## Step 3: Create a Monitoring Dashboard You can create a monitoring dashboard for MySQL and visualize real-time metrics. 1. In the same project, go to **Custom Monitoring** under **Monitoring & Alerting** in the sidebar and click **Create**. -2. In the dialog that appears, set a name for the dashboard (for example, `mysql-overview`) and select the MySQL template. Click **Next** to continue. +2. In the displayed dialog box, set a name for the dashboard (for example, `mysql-overview`) and select the MySQL template. Click **Next** to continue. -3. Save the template by clicking **Save Template** in the top-right corner. A newly-created dashboard will appear on the **Custom Monitoring Dashboards** page. - - ![mysql-dashboards](/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-dashboards.png) +3. Save the template by clicking **Save Template** in the upper-right corner. A newly-created dashboard is displayed on the **Custom Monitoring Dashboards** page. {{< notice note >}} diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md b/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md index aac17352b..42b44242a 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md @@ -11,7 +11,7 @@ This section walks you through monitoring a sample web application. The applicat ## Prerequisites - Please make sure you [enable the OpenPitrix system](../../../../pluggable-components/app-store/). -- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited to the workspace with the `self-provisioner` role. Namely, create an account `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (for example, `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`. +- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspaces, Projects, Users and Roles](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited to the workspace with the `self-provisioner` role. Namely, create a user `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (for example, `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`. - Knowledge of Helm charts and [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/). @@ -33,23 +33,9 @@ Find the source code in the folder `helm` in [kubesphere/prometheus-example-app] ### Step 3: Upload the Helm chart -1. Go to the workspace **Overview** page of `demo-workspace` and navigate to **App Templates**. +1. Go to the workspace **Overview** page of `demo-workspace` and navigate to **App Templates** under **App Management**. - ![app-template-create](/images/docs/project-user-guide/custom-application-monitoring/app-template-create.jpg) - -2. Click **Create** and upload `prometheus-example-app-0.1.0.tgz` as images below. - - ![click-create-app-template](/images/docs/project-user-guide/custom-application-monitoring/click-create-app-template.jpg) - - ![click-upload-app-template](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template.jpg) - - ![click-upload-app-template-2](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-2.jpg) - - ![click-upload-app-template-4](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-4.jpg) - - ![click-upload-app-template-5](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-5.jpg) - - ![click-upload-app-template-6](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-6.jpg) +2. Click **Create** and upload `prometheus-example-app-0.1.0.tgz`. ### Step 4: Deploy the sample web application @@ -57,62 +43,30 @@ You need to deploy the sample web application into `test`. For demonstration pur 1. Click `prometheus-example-app`. - ![deploy-sample-web-1](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-1.jpg) - -2. Expand the menu and click **Test Deployment**. - - ![deploy-sample-web-2](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-2.jpg) - - ![deploy-sample-web-3](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-3.jpg) +2. Expand the menu and click **Install**. 3. Make sure you deploy the sample web application in `test` and click **Next**. - ![deploy-sample-web-4](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-4.jpg) - -4. Make sure `serviceMonitor.enabled` is set to `true` and click **Deploy**. - - ![deploy-sample-web-5](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-5.jpg) - - ![deploy-sample-web-6](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-6.jpg) +4. Make sure `serviceMonitor.enabled` is set to `true` and click **Install**. 5. In **Workloads** of the project `test`, wait until the sample web application is up and running. - ![create-dashboard-1](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-1.jpg) - ### Step 5: Create a monitoring dashboard This section guides you on how to create a dashboard from scratch. You will create a text chart showing the total number of processed operations and a line chart for displaying the operation rate. -1. Navigate to **Custom Monitoring** and click **Create**. +1. Navigate to **Custom Monitoring Dashboards** and click **Create**. - ![create-dashboard-2](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-2.jpg) +2. Set a name (for example, `sample-web`) and click **Next**. -2. Set a name (for example, `sample-web`) and click **Create**. +3. Enter a title in the upper-left corner (for example, `Sample Web Overview`). - ![create-dashboard-3](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-3.jpg) +4. Click on the left column to create a text chart. -3. Enter a title in the top-left corner (for example, `Sample Web Overview`). +5. Type the PromQL expression `myapp_processed_ops_total` in the field **Monitoring Metric** and give a chart name (for example, `Operation Count`). Click **√** in the lower-right corner to continue. - ![create-dashboard-4](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-4.jpg) +6. Click **Add Monitoring Item**, select **Line Chart**, and click **OK**. -4. Click the **plus icon** on the left column to create a text chart. - - ![create-dashboard-5](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-5.jpg) - -5. Type the PromQL expression `myapp_processed_ops_total` in the field **Monitoring Metrics** and give a chart name (for example, `Operation Count`). Click **√** in the bottom-right corner to continue. - - ![create-dashboard-6](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-6.jpg) - -6. Click **Add Monitoring Item** to create a line chart. - - ![create-dashboard-7](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-7.jpg) - - ![create-dashboard-8](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-8.jpg) - -7. Type the PromQL expression `irate(myapp_processed_ops_total[3m])` for **Monitoring Metrics** and name the chart `Operation Rate`. To improve the appearance, you can set **Metric Name** to `{{service}}`. It will name each line with the value of the metric label `service`. Next, set **Decimal Places** to `2` so that the result will be truncated to two decimal places. - - ![create-dashboard-9](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-9.jpg) +7. Enter the PromQL expression `irate(myapp_processed_ops_total[3m])` for **Monitoring Metric** and name the chart `Operation Rate`. To improve the appearance, you can set **Metric Name** to `{{service}}`. It will name each line with the value of the metric label `service`. Next, set **Decimal Places** to `2` so that the result will be truncated to two decimal places. Click **√** in the lower-right corner to continue. 8. Click **Save Template** to save it. - - ![create-dashboard-10](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-10.jpg) \ No newline at end of file diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/introduction.md b/content/en/docs/project-user-guide/custom-application-monitoring/introduction.md index 1d305c8b0..3f9dfbd29 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/introduction.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/introduction.md @@ -20,7 +20,7 @@ First of all, your application must expose Prometheus-formatted metrics. The Pro #### Direct exposing -Directly exposing Prometheus metrics from applications is a common way among cloud-native applications. It requires developers to import Prometheus client libraries in their codes and expose metrics at a specific endpoint. Many applications, such as ETCD, CoreDNS, and Istio, adopt this method. +Directly exposing Prometheus metrics from applications is a common way among cloud-native applications. It requires developers to import Prometheus client libraries in their codes and expose metrics at a specific endpoint. Many applications, such as etcd, CoreDNS, and Istio, adopt this method. The Prometheus community offers client libraries for most programming languages. Find your language on the [Prometheus Client Libraries](https://prometheus.io/docs/instrumenting/clientlibs/) page. For Go developers, read [Instrumenting a Go application](https://prometheus.io/docs/guides/go-application/) to learn how to write a Prometheus-compliant application. diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/overview.md b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/overview.md index 775859df7..61f57b748 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/overview.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/overview.md @@ -16,8 +16,6 @@ There are three available built-in templates for MySQL, Elasticsearch, and Redis A KubeSphere custom monitoring dashboard can be seen as simply a YAML configuration file. The data model is heavily inspired by [Grafana](https://github.com/grafana/grafana), an open-source tool for monitoring and observability. Please find KubeSphere monitoring dashboard data model design in [kubesphere/monitoring-dashboard](https://github.com/kubesphere/monitoring-dashboard). The configuration file is portable and sharable. You are welcome to contribute dashboard templates to the KubeSphere community via [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery). -![new-dashboard](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png) - ### From a built-in template To help you quickly get started, KubeSphere provides built-in templates for MySQL, Elasticsearch, and Redis. If you want to create dashboards from built-in templates, select a template and then click **Next**. @@ -28,9 +26,7 @@ To start with a blank template, click **Next**. ### From a YAML file -Turn on **Edit Mode** in the upper-right corner and then paste your dashboard YAML file. - -![new-dashboard-2](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-mode.png) +Turn on **Edit YAML** in the upper-right corner and then paste your dashboard YAML file. ## Dashboard Layout @@ -40,25 +36,17 @@ The monitoring dashboard is composed of four parts. Global settings are on the t On the top bar, you can configure the following settings: title, theme, time range, and refresh interval. -![dashboard-layout](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-settings.png) - ### Text chart column You can add new text charts in the left-most column. -![dashboard-layout-2](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/text-charts.png) - ### Chart display column You can view charts in the middle column. -![dashboard-layout-3](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/view-charts.png) - ### Detail column -You can view chart details in the right-most column. It shows the **max**, **min**, **avg** and **last** value of metrics within the specific period. - -![dashboard-layout-4](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/detail-column.png) +You can view chart details in the right-most column. It shows the **max**, **min**, **avg**, and **last** value of metrics within the specific period. ## Edit the monitoring dashboard @@ -68,8 +56,6 @@ You can modify an existing template by clicking **Edit Template** in the upper-r To add text charts, click in the left column. To add charts in the middle column, click **Add Monitoring Item** in the lower-right corner. -![edit-dashboard](/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/add-charts.png) - ### Add a monitoring group To group monitoring items, you can click to drag and drop an item into the target group. To add a new group, click **Add Monitoring Group**. If you want to change the place of a group, hover over a group and click or arrow on the right. diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/panel.md b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/panel.md index 282221549..1dd9703d8 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/panel.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/panel.md @@ -15,23 +15,20 @@ A text chart is preferable for displaying a single metric value. The editing win - **Chart Name**: The name of the text chart. - **Unit**: The metric data unit. - **Decimal Places**: Accept an integer. -- **Monitoring Metrics**: A list of available Prometheus metrics. +- **Monitoring Metric**: Specify a monitoring metric from the drop-down list of available Prometheus metrics. -![text-chart](/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png) +## Graph Chart -## Graph +A graph chart is preferable for displaying multiple metric values. The editing window for the graph is composed of three parts. The upper part displays real-time metric values. The left part is for setting the graph theme. The right part is for editing metrics and chart descriptions. -A graph is preferable for displaying multiple metric values. The editing window for the graph is composed of three parts. The upper part displays real-time metric values. The left part is for setting the graph theme. The right part is for editing metrics and chart descriptions. - -- **Graph Types**: Support line charts and stacked charts. +- **Chart Types**: Support basic charts and bar charts. +- **Graph Types**: Support basic charts and stacked charts. - **Chart Colors**: Change line colors. - **Chart Name**: The name of the chart. - **Description**: The chart description. - **Add**: Add a new query editor. - **Metric Name**: Legend for the line. It supports variables. For example, `{{pod}}` means using the value of the Prometheus metric label `pod` to name this line. - **Interval**: The step value between two data points. -- **Monitoring Metrics**: A list of available Prometheus metrics. +- **Monitoring Metric**: A list of available Prometheus metrics. - **Unit**: The metric data unit. - **Decimal Places**: Accept an integer. - -![graph-chart](/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png) \ No newline at end of file diff --git a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/querying.md b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/querying.md index ac11c4048..11e5a2ccc 100644 --- a/content/en/docs/project-user-guide/custom-application-monitoring/visualization/querying.md +++ b/content/en/docs/project-user-guide/custom-application-monitoring/visualization/querying.md @@ -6,7 +6,7 @@ linkTitle: "Querying" weight: 10817 --- -In the query editor, you can enter PromQL expressions to process and fetch metrics. To learn how to write PromQL, read [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/). +In the query editor, enter PromQL expressions in **Monitoring Metrics** to process and fetch metrics. To learn how to write PromQL, read [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/). ![query-editor-1](/images/docs/project-user-guide/custom-application-monitoring/visualization/querying/query-editor-1.png) diff --git a/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md b/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md index c053d42af..1eb06f097 100644 --- a/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md +++ b/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md @@ -1,7 +1,7 @@ --- -title: "Kubernetes Blue-green Deployment in Kubesphere" -keywords: 'KubeSphere, Kubernetes, service mesh, istio, release, blue-green deployment' -description: 'Learn how to release a blue-green deployment in KubeSphere.' +title: "Kubernetes Blue-Green Deployment on Kubesphere" +keywords: 'KubeSphere, Kubernetes, Service Mesh, Istio, Grayscale Release, Blue-Green deployment' +description: 'Learn how to release a blue-green deployment on KubeSphere.' linkTitle: "Blue-Green Deployment with Kubernetes" weight: 10520 --- @@ -15,41 +15,27 @@ The blue-green release provides a zero downtime deployment, which means the new ## Prerequisites - You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to enable **Application Governance** and have an available app so that you can implement the blue-green deployment for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/). ## Create a Blue-green Deployment Job -1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Categories**, click **Create Job** on the right of **Blue-green Deployment**. +1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Blue-Green Deployment**. 2. Set a name for it and click **Next**. -3. On the **Grayscale Release Components** tab, select your app from the drop-down list and the Service for which you want to implement the blue-green deployment. If you also use the sample app Bookinfo, select **reviews** and click **Next**. +3. On the **Service Settings** tab, select your app from the drop-down list and the Service for which you want to implement the blue-green deployment. If you also use the sample app Bookinfo, select **reviews** and click **Next**. -4. On the **Grayscale Release Version** tab, add another version (e.g `v2`) as shown in the following figure and click **Next**: +4. On the **New Version Settings** tab, add another version (e.g `kubesphere/examples-bookinfo-reviews-v2:1.16.2`) as shown in the following figure and click **Next**. - ![blue-green-4](/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.png) +5. On the **Strategy Settings** tab, to allow the app version `v2` to take over all the traffic, select **Take Over** and click **Create**. - {{< notice note >}} +6. The blue-green deployment job created is displayed under the **Release Jobs** tab. Click it to view details. - The image version is `v2` in the screenshot. - - {{}} - -5. On the **Policy Config** tab, to allow the app version `v2` to take over all the traffic, select **Take over all traffic** and click **Create**. - -6. The blue-green deployment job created is displayed under the tab **Job Status**. Click it to view details. - - ![blue-green-job-list](/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.png) - -7. Wait for a while and you can see all the traffic go to the version `v2`: - - ![blue-green-6](/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.png) +7. Wait for a while and you can see all the traffic go to the version `v2`. 8. The new **Deployment** is created as well. - ![version2-deployment](/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.png) - 9. You can get the virtual service to identify the weight by running the following command: ```bash @@ -59,7 +45,7 @@ The blue-green release provides a zero downtime deployment, which means the new {{< notice note >}} - When you run the command above, replace `demo-project` with your own project (namely, namespace) name. - - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the account `admin`. + - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. {{}} @@ -83,7 +69,6 @@ The blue-green release provides a zero downtime deployment, which means the new ## Take a Job Offline -After you implement the blue-green deployment, and the result meets your expectation, you can take the task offline with the version `v1` removed by clicking **Job offline**. +After you implement the blue-green deployment, and the result meets your expectation, you can take the task offline with the version `v1` removed by clicking **Delete**. -![blue-green-7](/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.png) diff --git a/content/en/docs/project-user-guide/grayscale-release/canary-release.md b/content/en/docs/project-user-guide/grayscale-release/canary-release.md index 53d2669b6..d87aa11d9 100644 --- a/content/en/docs/project-user-guide/grayscale-release/canary-release.md +++ b/content/en/docs/project-user-guide/grayscale-release/canary-release.md @@ -1,7 +1,7 @@ --- title: "Canary Release" -keywords: 'KubeSphere, Kubernetes, canary release, istio, service mesh' -description: 'Learn how to deploy a canary service in KubeSphere.' +keywords: 'KubeSphere, Kubernetes, Canary Release, Istio, Service Mesh' +description: 'Learn how to deploy a canary service on KubeSphere.' linkTitle: "Canary Release" weight: 10530 --- @@ -16,30 +16,20 @@ This method serves as an efficient way to test performance and reliability of a - You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). - You need to enable [KubeSphere Logging](../../../pluggable-components/logging/) so that you can use the Tracing feature. -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to enable **Application Governance** and have an available app so that you can implement the canary release for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy and Access Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/). ## Step 1: Create a Canary Release Job -1. Log in to KubeSphere as `project-regular` and navigate to **Grayscale Release**. Under **Categories**, click **Create Job** on the right of **Canary Release**. +1. Log in to KubeSphere as `project-regular` and navigate to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Canary Release**. 2. Set a name for it and click **Next**. -3. On the **Grayscale Release Components** tab, select your app from the drop-down list and the Service for which you want to implement the canary release. If you also use the sample app Bookinfo, select **reviews** and click **Next**. +3. On the **Service Settings** tab, select your app from the drop-down list and the Service for which you want to implement the canary release. If you also use the sample app Bookinfo, select **reviews** and click **Next**. -4. On the **Grayscale Release Version** tab, add another version of it (e.g `kubesphere/examples-bookinfo-reviews-v2:1.13.0`; change `v1` to `v2`) as shown in the image below and click **Next**: +4. On the **New Version Settings** tab, add another version of it (e.g `kubesphere/examples-bookinfo-reviews-v2:1.16.2`; change `v1` to `v2`) and click **Next**. - ![canary-release-4](/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.png) - - {{< notice note >}} - - The image version is `v2` in the screenshot. - - {{}} - -5. You send traffic to these two versions (`v1` and `v2`) either by a specific percentage or by the request content such as `Http Header`, `Cookie` and `URI`. Select **Forward by traffic ratio** and drag the icon in the middle to change the percentage of traffic sent to these two versions respectively (for example, set 50% for either one). When you finish, click **Create**. - - ![canary-release-5](/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.png) +5. You send traffic to these two versions (`v1` and `v2`) either by a specific percentage or by the request content such as `Http Header`, `Cookie` and `URI`. Select **Specify Traffic Distribution** and move the slider to the middle to change the percentage of traffic sent to these two versions respectively (for example, set 50% for either one). When you finish, click **Create**. ## Step 2: Verify the Canary Release @@ -47,20 +37,12 @@ Now that you have two available app versions, access the app to verify the canar 1. Visit the Bookinfo website and refresh your browser repeatedly. You can see that the **Book Reviews** section switching between v1 and v2 at a rate of 50%. - ![canary](/images/docs/quickstart/deploy-bookinfo-to-k8s/canary.gif) +2. The created canary release job is displayed under the tab **Release Jobs**. Click it to view details. -2. The created canary release job is displayed under the tab **Job Status**. Click it to view details. - - ![canary-release-job](/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.png) - -3. You can see half of the traffic goes to each of them: - - ![canary-release-6](/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.png) +3. You can see half of the traffic goes to each of them. 4. The new Deployment is created as well. - ![deployment-list-1](/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.png) - 5. You can directly get the virtual Service to identify the weight by executing the following command: ```bash @@ -70,7 +52,7 @@ Now that you have two available app versions, access the app to verify the canar {{< notice note >}} - When you execute the command above, replace `demo-project` with your own project (namely, namespace) name. - - If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the account `admin`. + - If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. {{}} @@ -110,40 +92,29 @@ Now that you have two available app versions, access the app to verify the canar Make sure you replace the hostname and port number in the above command with your own. {{}} -2. In **Traffic Management**, you can see communications, dependency, health and performance among different microservices. +2. In **Traffic Monitoring**, you can see communications, dependency, health and performance among different microservices. - ![traffic-management](/images/docs/project-user-guide/grayscale-release/canary-release/traffic-management.png) - -3. Click a component (for example, **reviews**) and you can see the information of traffic monitoring on the right, displaying real-time data of **Traffic**, **Success rate** and **Duration**. - - ![topology](/images/docs/project-user-guide/grayscale-release/canary-release/topology.png) +3. Click a component (for example, **reviews**) and you can see the information of traffic monitoring on the right, displaying real-time data of **Traffic**, **Success rate**, and **Duration**. ## Step 4: View Tracing Details KubeSphere provides the distributed tracing feature based on [Jaeger](https://www.jaegertracing.io/), which is used to monitor and troubleshoot microservices-based distributed applications. -1. On the **Tracing** tab, you can clearly see all phases and internal calls of requests, as well as the period in each phase. - - ![tracing](/images/docs/project-user-guide/grayscale-release/canary-release/tracing.png) +1. On the **Tracing** tab, you can see all phases and internal calls of requests, as well as the period in each phase. 2. Click any item, and you can even drill down to see request details and where this request is being processed (which machine or container). - ![tracing-kubesphere](/images/docs/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png) - ## Step 5: Take Over All Traffic If everything runs smoothly, you can bring all the traffic to the new version. -1. In **Grayscale Release**, click the canary release job. +1. In **Release Jobs**, click the canary release job. 2. In the displayed dialog box, click on the right of **reviews v2** and select **Take Over**. It means 100% of the traffic will be sent to the new version (v2). - ![take-over-release](/images/docs/project-user-guide/grayscale-release/canary-release/take-over-release.png) - {{< notice note >}} If anything goes wrong with the new version, you can roll back to the previous version v1 anytime. {{}} 3. Access Bookinfo again and refresh the browser several times. You can find that it only shows the result of **reviews v2** (i.e. ratings with black stars). - ![finish-canary-release](/images/docs/project-user-guide/grayscale-release/canary-release/finish-canary-release.png) diff --git a/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md b/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md index 61f341e94..7d7568fd4 100644 --- a/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md +++ b/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md @@ -1,7 +1,7 @@ --- title: "Traffic Mirroring" -keywords: 'KubeSphere, Kubernetes, traffic mirroring, istio' -description: 'Learn how to conduct a traffic mirroring job in KubeSphere.' +keywords: 'KubeSphere, Kubernetes, Traffic Mirroring, Istio' +description: 'Learn how to conduct a traffic mirroring job on KubeSphere.' linkTitle: "Traffic Mirroring" weight: 10540 --- @@ -11,41 +11,27 @@ Traffic mirroring, also called shadowing, is a powerful, risk-free method of tes ## Prerequisites - You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/). -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to enable **Application Governance** and have an available app so that you can mirror the traffic of it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/). ## Create a Traffic Mirroring Job -1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Categories**, click **Create Job** on the right of **Traffic Mirroring**. +1. Log in to KubeSphere as `project-regular` and go to **Grayscale Release**. Under **Release Modes**, click **Create** on the right of **Traffic Mirroring**. 2. Set a name for it and click **Next**. -3. On the **Grayscale Release Components** tab, select your app from the drop-down list and the Service of which you want to mirror the traffic. If you also use the sample app Bookinfo, select **reviews** and click **Next**. +3. On the **Service Settings** tab, select your app from the drop-down list and the Service of which you want to mirror the traffic. If you also use the sample app Bookinfo, select **reviews** and click **Next**. -4. On the **Grayscale Release Version** tab, add another version of it (for example, `v2`) as shown in the image below and click **Next**: +4. On the **New Version Settings** tab, add another version of it (for example, `kubesphere/examples-bookinfo-reviews-v2:1.16.2`; change `v1` to `v2`) and click **Next**. - ![traffic-mirroring-4](/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.png) +5. On the **Strategy Settings** tab, click **Create**. - {{< notice note >}} - - The image version is `v2` in the screenshot. - - {{}} - -5. On the **Policy Config** tab, click **Create**. - -6. The traffic mirroring job created is displayed under the tab **Job Status**. Click it to view details. - - ![traffic-mirroing-task](/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.png) +6. The traffic mirroring job created is displayed under the **Release Jobs** tab. Click it to view details. 7. You can see the traffic is being mirrored to `v2` with real-time traffic displayed in the line chart. - ![traffic-mirroring-6](/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.png) - 8. The new **Deployment** is created as well. - ![new-deployment](/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.png) - 9. You can get the virtual service to view `mirror` and `weight` by running the following command: ```bash @@ -55,7 +41,7 @@ Traffic mirroring, also called shadowing, is a powerful, risk-free method of tes {{< notice note >}} - When you run the command above, replace `demo-project` with your own project (namely, namespace) name. - - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the account `admin`. + - If you want to run the command from the web kubectl on the KubeSphere console, you need to use the user `admin`. {{}} @@ -92,6 +78,4 @@ These requests are mirrored as “fire and forget”, which means that the respo ## Take a Job Offline -You can remove the traffic mirroring job by clicking **Job offline**, which does not affect the current app version. - -![remove-traffic-mirroring](/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.png) \ No newline at end of file +You can remove the traffic mirroring job by clicking **Delete**, which does not affect the current app version. diff --git a/content/en/docs/project-user-guide/image-builder/binary-to-image.md b/content/en/docs/project-user-guide/image-builder/binary-to-image.md index 31c2c39ab..5e5fd6a57 100644 --- a/content/en/docs/project-user-guide/image-builder/binary-to-image.md +++ b/content/en/docs/project-user-guide/image-builder/binary-to-image.md @@ -20,13 +20,13 @@ For demonstration and testing purposes, here are some example artifacts you can | [b2i-war-java11.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java11.war) | [springmvc5](https://github.com/kubesphere/s2i-java-container/tree/master/tomcat/examples/springmvc5) | | [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) | [devops-go-sample](https://github.com/runzexia/devops-go-sample) | | [b2i-jar-java11.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java11.jar) | [ java-maven-example](https://github.com/kubesphere/s2i-java-container/tree/master/java/examples/maven) | -| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-java-sample](https://github.com/kubesphere/devops-java-sample) | +| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) | ## Prerequisites - You have enabled the [KubeSphere DevOps System](../../../pluggable-components/devops/). - You need to create a [Docker Hub](http://www.dockerhub.com/) account. GitLab and Harbor are also supported. -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/set-ci-node/). ## Create a Service Using Binary-to-Image (B2I) @@ -43,83 +43,52 @@ You must create a Docker Hub Secret so that the Docker image created through B2I 1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**. - ![create-service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/create-service.png) - -2. Scroll down to **Build a New Service through the Artifact** and select **war**. This tutorial uses the [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) project as a sample and uploads a war artifact to KubeSphere. Set a name, such as `b2i-war-java8`, and click **Next**. +2. Scroll down to **Create Service from Artifact** and select **WAR**. This tutorial uses the [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) project as a sample and uploads a war artifact to KubeSphere. Set a name, such as `b2i-war-java8`, and click **Next**. 3. On the **Build Settings** page, provide the following information accordingly and click **Next**. - ![build-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-settings.png) - **Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type). - **Upload Artifact**: Upload the war artifact ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war)). + **Artifact File**: Upload the war artifact ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war)). **Build Environment**: Select **kubesphere/tomcat85-java8-centos7:v2.1.0**. - **imageName**: Enter `/` or `/` as the image name. + **Image Name**: Enter `/` or `/` as the image name. - **tag**: The image tag. Enter `latest`. + **Image Tag**: The image tag. Enter `latest`. - **Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. -4. On the **Container Settings** page, scroll down to **Service Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-port`), and enter `8080` for both **Container Port** and **Service Port**. Click **Next** to continue. - - ![container-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/container-settings.png) +4. On the **Pod Settings** page, scroll down to **Port Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-port`), and enter `8080` for both **Container Port** and **Service Port**. Click **Next** to continue. {{< notice note >}} - For more information about how to set other parameters on the **Container Settings** page, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/). + For more information about how to set other parameters on the **Container Settings** page, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). {{}} -5. On the **Mount Volumes** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. +5. On the **Volume Settings** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. -6. On the **Advanced Settings** page, select **Internet Access** and choose **NodePort** as the access method. Click **Create** to finish the whole process. - - ![advanced-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/advanced-settings.png) - -7. Click **Image Builder** from the navigation bar and you can see that the example image is being built.![building](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building.png) +6. On the **Advanced Settings** page, select **External Access** and choose **NodePort** as the access method. Click **Create** to finish the whole process. +7. Click **Image Builders** from the navigation bar and you can see that the example image is being built. ### Step 3: Check results -1. Wait for a while and you can see the status of the image has reached **Successful**. +1. Wait for a while and you can see the status of the image builder has reached **Successful**. - ![successful](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/successful.png) +2. Click this image to go to its details page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. -2. Click this image to go to its detail page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. - - ![inspect-logs](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-logs.png) - -3. Go back to the previous page, and you can see the corresponding Job, Deployment and Service of the image have all been created successfully. - - #### Service - - ![service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service.png) - - #### Deployment - - ![deployment](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/deployment.png) - - #### Job - - ![job](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job.png) +3. Go back to the **Services**, **Deployments**, and **Jobs** page, and you can see the corresponding Service, Deployment, and Job of the image have been all created successfully. 4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. - ![docker-image](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image.png) - ### Step 4: Access the B2I Service -1. On the **Services** page, click the B2I Service to go to its detail page, where you can see the port number has been exposed. - - ![exposed-port](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/exposed-port.png) +1. On the **Services** page, click the B2I Service to go to its details page, where you can see the port number has been exposed. 2. Access the Service at `http://://`. - ![access-service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/access-service.jpg) - {{< notice note >}} You may need to open the port in your security groups and configure port forwarding rules depending on your deployment environment. @@ -142,47 +111,31 @@ Make sure you have created a Secret for Docker Hub. For more information, see [C 1. Log in to KubeSphere as `project-regular` and go to your project. -2. Select **Image Builder** from the navigation bar and click **Create**. +2. Select **Image Builders** from the navigation bar and click **Create**. - ![image-builder](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-builder.png) - -3. In the displayed dialog box, select **binary** and click **Next**. - - ![upload-artifact](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/upload-artifact.png) +3. In the displayed dialog box, select **Binary** and click **Next**. 4. On the **Build Settings** page, provide the following information accordingly and click **Create**. - ![buidling-settings-2](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/buidling-settings-2.png) - - **Upload Artifact**: Download [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) and upload it to KubeSphere. + **Artifact File**: Download [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) and upload it to KubeSphere. **Build Environment**: Select **kubesphere/s2i-binary:v2.1.0**. - **imageName**: Customize an image name. + **Image Name**: Customize an image name. - **tag**: The image tag. Enter `latest`. + **Image Tag**: The image tag. Enter `latest`. - **Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. 5. On the **Image Builder** page, you can see that the image is being built. - ![building-status](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building-status.png) - ### Step 2: Check results -1. Wait for a while and you can see the status of the image has reached **Successful**. +1. Wait for a while and you can see the status of the image builder has reached **Successful**. - ![image-success](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-success.png) +2. Click this image builder to go to its details page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. -2. Click this image to go to its detail page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. - - ![inspect-log](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-log.png) - -3. Go back to the previous page, and you can see the corresponding Job of the image has been created successfully. - - ![job-created](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job-created.png) +3. Go to the **Jobs** page, and you can see the corresponding Job of the image has been created successfully. 4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. - ![docker-image-pushed](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image-pushed.png) - diff --git a/content/en/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md b/content/en/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md index d161a3b94..3b89fba20 100644 --- a/content/en/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md +++ b/content/en/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md @@ -6,33 +6,27 @@ linkTitle: "Configure S2I and B2I Webhooks" weight: 10650 --- -KubeSphere provides Source-to-Image (S2I) and Binary-to-Image (B2I) features to automate image building and pushing and application deployment. In KubeSphere v3.1, you can configure S2I and B2I webhooks so that your Image Builder can be automatically triggered when there is any relevant activity in your code repository. +KubeSphere provides Source-to-Image (S2I) and Binary-to-Image (B2I) features to automate image building and pushing and application deployment. In KubeSphere v3.1.x and later versions, you can configure S2I and B2I webhooks so that your Image Builder can be automatically triggered when there is any relevant activity in your code repository. This tutorial demonstrates how to configure S2I and B2I webhooks. ## Prerequisites - You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/). -- You need to create a workspace, a project (`demo-project`) and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project (`demo-project`) and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - You need to create an S2I Image Builder and a B2I Image Builder. For more information, refer to [Source to Image: Publish an App without a Dockerfile](../source-to-image/) and [Binary to Image: Publish an Artifact to Kubernetes](../binary-to-image/). ## Configure an S2I Webhook ### Step 1: Expose the S2I trigger Service -1. Log in to the KubeSphere web console as `admin`. Click **Platform** in the top-left corner and then select **Cluster Management**. +1. Log in to the KubeSphere web console as `admin`. Click **Platform** in the upper-left corner and then select **Cluster Management**. -2. In **Services** under **Application Workloads**, select **kubesphere-devops-system** from the drop-down list and click **s2ioperator-trigger-service** to go to its detail page. +2. In **Services** under **Application Workloads**, select **kubesphere-devops-system** from the drop-down list and click **s2ioperator-trigger-service** to go to its details page. - ![s2i-trigger-service](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-service.png) +3. Click **More** and select **Edit External Access**. -3. Click **More** and select **Edit Internet Access**. - - ![edit-trigger-service](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/edit-trigger-service.png) - -4. In the window that appears, select **NodePort** from the drop-down list for **Access Method** and then click **OK**. - - ![select-nodeport](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/select-nodeport.png) +4. In the displayed dialog box, select **NodePort** from the drop-down list for **Access Method** and then click **OK**. {{< notice note >}} @@ -40,29 +34,19 @@ This tutorial demonstrates how to configure S2I and B2I webhooks. {{}} -5. You can view the **Node Port** on the detail page. It will be included in the S2I webhook URL. - - ![s2i-nodeport](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-nodeport.png) +5. You can view the **NodePort** on the details page. It is going to be included in the S2I webhook URL. ### Step 2: Configure an S2I webhook 1. Log out of KubeSphere and log back in as `project-regular`. Go to `demo-project`. -2. In **Image Builder**, click the S2I Image Builder to go to its detail page. +2. In **Image Builders**, click the S2I Image Builder to go to its details page. - ![click-s2i](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-s2i.png) - -3. You can see an auto-generated link shown in **Remote Trigger Link**. Copy `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` as it will be included in the S2I webhook URL. - - ![s2i-trigger-link](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-link.png) +3. You can see an auto-generated link shown in **Remote Trigger**. Copy `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` as it is going to be included in the S2I webhook URL. 4. Log in to your GitHub account and go to the source code repository used for the S2I Image Builder. Go to **Webhooks** under **Settings** and then click **Add webhook**. - ![click-add-webhook](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-add-webhook.png) - -5. In **Payload URL**, enter `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`. You can select trigger events based on your needs and then click **Add webhook**. This tutorial chooses **Just the push event** for demonstration purposes. - - ![add-payload-url](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/add-payload-url.png) +5. In **Payload URL**, enter `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`. You can select trigger events based on your needs and then click **Add webhook**. This tutorial selects **Just the push event** for demonstration purposes. {{< notice note >}} @@ -72,11 +56,7 @@ This tutorial demonstrates how to configure S2I and B2I webhooks. 6. Once the webhook is added, you can click the webhook to view delivery details in **Recent Deliveries**. You can see a green tick if the Payload URL is valid. - ![webhook-delivery](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/webhook-delivery.png) - -7. After you finish all the above operations, the S2I Image Builder will be automatically triggered if there is a push event to the source code repository. - - ![s2i-auto-build](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-auto-build.png) +7. After you finish all the above operations, the S2I image builder will be automatically triggered if there is a push event to the source code repository. ## Configure a B2I Webhook @@ -84,14 +64,10 @@ You can follow the same steps to configure a B2I webhook. 1. Expose the S2I trigger Service. -2. View the **Remote Trigger Link** in the detail page of your B2I Image Builder. - - ![b2i-trigger-link](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-trigger-link.png) +2. View the **Remote Trigger** on the details page of your B2I image builder. 3. Add the payload URL in the source code repository. The B2I payload URL format is the same as that of S2I payload URL. - ![b2i-payload-url](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-payload-url.png) - {{< notice note >}} You may need to configure necessary port forwarding rules and open the port in your security groups depending on where your Kubernetes cluster is deployed. @@ -100,7 +76,6 @@ You can follow the same steps to configure a B2I webhook. 4. The B2I Image Builder will be automatically triggered if there is a relevant event to the source code repository. - ![b2i-auto-build](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-auto-build.png) diff --git a/content/en/docs/project-user-guide/image-builder/s2i-templates.md b/content/en/docs/project-user-guide/image-builder/s2i-templates.md index e40414b01..ab9e6336e 100644 --- a/content/en/docs/project-user-guide/image-builder/s2i-templates.md +++ b/content/en/docs/project-user-guide/image-builder/s2i-templates.md @@ -37,7 +37,7 @@ The Image Builder is compatible with that of OpenShift, and you can reuse it in 1. The [S2I command line tool](https://github.com/openshift/source-to-image/releases) provides an easy-to-use command to initialize a base directory structure required by the Builder. Run the following commands to install the S2I CLI. ```bash - $ wget https://github.com/openshift/source-to-image/releases/download/v1.1.14/source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ wget https://github.com/openshift/source-to-image/releases/download/v1.2.04/source-to-image-v1.1.14-874754de-linux-386.tar.gz $ tar -xvf source-to-image-v1.1.14-874754de-linux-386.tar.gz $ ls s2i source-to-image-v1.1.14-874754de-linux-386.tar.gz sti @@ -252,8 +252,6 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n You can access the Nginx application at `http://localhost:8080`. - ![access-nginx](/images/docs/project-user-guide/image-builder/s2i-templates/access-nginx.png) - ### Step 5: Push the image and create an S2I template Once you finish testing the S2I Image Builder locally, you can push the image to your custom image repository. You also need to create a YAML file as the S2I Builder template as follows. @@ -286,9 +284,7 @@ spec: s2ibuildertemplate.devops.kubesphere.io/nginx created ``` -2. You can find the customized S2I template available when you create an S2I build on KubeSphere. - - ![template-available](/images/docs/project-user-guide/image-builder/s2i-templates/template-available.png) +2. You can find the customized S2I template available in **Build Environment** when you create an S2I build on KubeSphere. ## S2I Template Parameters Definition diff --git a/content/en/docs/project-user-guide/image-builder/source-to-image.md b/content/en/docs/project-user-guide/image-builder/source-to-image.md index 0c7dbb75f..72a2ca5a1 100644 --- a/content/en/docs/project-user-guide/image-builder/source-to-image.md +++ b/content/en/docs/project-user-guide/image-builder/source-to-image.md @@ -16,16 +16,14 @@ This tutorial demonstrates how to use S2I to import source code of a Java sample - You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/) as S2I is integrated into it. - You need to create a [GitHub](https://github.com/) account and a [Docker Hub](http://www.dockerhub.com/) account. GitLab and Harbor are also supported. This tutorial uses a GitHub repository to provide the source code for building and pushes an image to Docker Hub. -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/set-ci-node/). ## Use Source-to-Image (S2I) ### Step 1: Fork the example repository -Log in to GitHub and fork the GitHub repository [devops-java-sample](https://github.com/kubesphere/devops-java-sample) to your personal GitHub account. - -![fork-repository](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/fork-repository.png) +Log in to GitHub and fork the GitHub repository [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) to your personal GitHub account. ### Step 2: Create Secrets @@ -41,11 +39,7 @@ You do not need to create the GitHub Secret if your forked repository is open to 1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**. - ![create-service](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-service.png) - -2. Choose **Java** under **Build a New Service from Source Code Repository**, name it `s2i-demo` and click **Next**. - - ![select-lang-type](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/select-lang-type.png) +2. Choose **Java** under **Create Service from Source Code**, name it `s2i-demo` and click **Next**. {{< notice note >}} @@ -55,89 +49,57 @@ You do not need to create the GitHub Secret if your forked repository is open to 3. On the **Build Settings** page, provide the following information accordingly and click **Next**. - ![build-settings](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-settings.png) - **Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type). **Build Environment**: Select **kubesphere/java-8-centos7:v2.1.0**. - **Code URL**: The source code repository address (currently support Git). You can specify the code branch and the relative path in the source code terminal. The URL supports HTTP and HTTPS. Paste the forked repository URL (your own repository address) into this field. + **Code Repository URL**: The source code repository address (currently support Git). You can specify the code branch and the relative path in the source code terminal. The URL supports HTTP and HTTPS. Paste the forked repository URL (your own repository address) into this field. - ![copy-repo-code](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/copy-repo-code.png) + **Code Repository Branch**: The branch that is used for image building. Enter `master` for this tutorial. You can enter `dependency` for a cache test. - **Branch**: The branch that is used for image building. Enter `master` for this tutorial. You can enter `dependency` for a cache test. + **Code Repository Key**: You do not need to provide any Secret for a public repository. Select the GitHub Secret if you want to use a private repository. - **Secret**: You do not need to provide any Secret for a public repository. Select the GitHub Secret if you want to use a private repository. + **Image Name**: Customize an image name. As this tutorial will push an image to Docker Hub, enter `dockerhub_username/s2i-sample`. `dockerhub_username` is your Docker ID and make sure it has the permission to push and pull images. - **imageName**: Customize an image name. As this tutorial will push an image to Docker Hub, enter `dockerhub_username/s2i-sample`. `dockerhub_username` is your Docker ID and make sure it has the permission to push and pull images. + **Image Tag**: The image tag. Enter `latest`. - **tag**: The image tag. Enter `latest`. - - **Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub. + **Target Image Registry**: Select the Docker Hub Secret as the image is pushed to Docker Hub. **Advanced Settings**: You can define the code relative path. Use the default `/` for this field. -4. On the **Container Settings** page, scroll down to **Service Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-1`), and enter `8080` for both **Container Port** and **Service Port**. +4. On the **Pod Settings** page, scroll down to **Port Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-1`), and enter `8080` for both **Container Port** and **Service Port**. - ![service-settings](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-settings.png) - -5. Scroll down to **Health Checker** and select it. Set a readiness probe by filling out the following parameters. Click **√** when you finish setting the probe and then click **Next** to continue. - - ![health-checker](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/health-checker.png) +5. Scroll down to **Health Check** and select it. Set **Readiness Check** by filling out the following parameters. Click **√** when you finish setting the probe and then click **Next** to continue. **HTTP Request**: Select **HTTP** as the protocol, enter `/` as the path (root path in this tutorial), and enter `8080` as the port exposed. - **Initial Delays**: The number of seconds after the container has started before the liveness probe is initiated. Enter `30` for this field. + **Initial Delay (s)**: The number of seconds after the container has started before the liveness probe is initiated. Enter `30` for this field. - **Timeouts**: The number of seconds after which the probe times out. Enter `10` for this field. + **Timeout (s)**: The number of seconds after which the probe times out. Enter `10` for this field. - For other fields, use the default value directly. For more information about how to configure probes and set other parameters on the **Container Settings** page, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/). + For other fields, use the default value directly. For more information about how to configure probes and set other parameters on the **Container Settings** page, see [Pod Settings](../../../project-user-guide/application-workloads/container-image-settings/). -6. On the **Mount Volumes** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. +6. On the **Volume Settings** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue. -7. On the **Advanced Settings** page, select **Internet Access** and select **NodePort** as the access method. Click **Create** to finish the whole process. +7. On the **Advanced Settings** page, select **External Access** and select **NodePort** as the access method. Click **Create** to finish the whole process. - ![create-finish](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-finish.png) - -8. Click **Image Builder** from the navigation bar and you can see that the example image is being built. - - ![building](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/building.png) +8. Click **Image Builders** from the navigation bar and you can see that the example image is being built. ### Step 4: Check results -1. Wait for a while and you can see the status of the image has reached **Successful**. +1. Wait for a while and you can see the status of the image builder has reached **Successful**. - ![success-result](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/success-result.png) +2. Click this image builder to go to its details page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. -2. Click this image to go to its detail page. Under **Job Records**, click on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally. - - ![build-log](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-log.png) - -3. Go back to the previous page, and you can see the corresponding Job, Deployment and Service of the image have been all created successfully. - - #### Service - - ![service](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service.png) - - #### Deployment - - ![deployment](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/deployment.png) - - #### Job - - ![job](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/job.png) +3. Go back to the **Services**, **Deployments**, and **Jobs** page, and you can see the corresponding Service, Deployment, and Job of the image have been all created successfully. 4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag. - ![docker-image](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/docker-image.png) - ### Step 5: Access the S2I Service -1. On the **Services** page, click the S2I Service to go to its detail page. +1. On the **Services** page, click the S2I Service to go to its details page. - ![service-detail](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-detail.png) - -2. To access the Service, you can either use the endpoint with the `curl` command or visit `:`. For example: +2. To access the Service, you can either use the endpoint with the `curl` command or visit `:`. For example: ```bash $ curl 10.10.131.44:8080 diff --git a/content/en/docs/project-user-guide/storage/volume-snapshots.md b/content/en/docs/project-user-guide/storage/volume-snapshots.md index de197b2c7..c94a7f78a 100644 --- a/content/en/docs/project-user-guide/storage/volume-snapshots.md +++ b/content/en/docs/project-user-guide/storage/volume-snapshots.md @@ -1,7 +1,7 @@ --- title: "Volume Snapshots" -keywords: 'KubeSphere, Kubernetes, volume, snapshots' -description: 'Learn how to manage a snapshot of a persistent volume in KubeSphere.' +keywords: 'KubeSphere, Kubernetes, Volume, Snapshots' +description: 'Learn how to manage a snapshot of a persistent volume on KubeSphere.' linkTitle: "Volume Snapshots" weight: 10320 --- @@ -12,7 +12,7 @@ This tutorial demonstrates how to create and use a volume snapshot. ## Prerequisites -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - Kubernetes version 1.17+. @@ -22,23 +22,21 @@ This tutorial demonstrates how to create and use a volume snapshot. ## Create a Volume Snapshot 1. Log in to the web console of KubeSphere as `project-regular`. On the **Volumes** page of your project, select a volume that you want to create a snapshot for. -2. On the detail page, select **Create Snapshot** from the **More** drop-down list. -3. In the dialog that appears, set a name for the snapshot which serves as a unique identifier. Click **OK** to finish. +2. On the details page, select **Create Snapshot** from the **More** drop-down menu. +3. In the displayed dialog box, set a name for the snapshot which serves as a unique identifier and select a **Volume Snapshot Class**. Click **OK** to finish. -4. Newly-created snapshots will appear on the **Volume Snapshots** list. +4. Newly-created snapshots is displayed on the **Volume Snapshots** list. ## Use a Snapshot to Create a Volume There are two ways for you to use a snapshot to create a volume. -### Create a volume from the snapshot detail page +### Create a volume from the snapshot details page -1. Log in to the web console of KubeSphere as `project-regular`. On a snapshot's detail page, click **Apply** to use the snapshot. Generally, the steps are the same as creating a volume directly. +1. Log in to the web console of KubeSphere as `project-regular`. On a snapshot's details page, click **Apply** to use the snapshot. Generally, the steps are the same as creating a volume directly. - ![apply-volume](/images/docs/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg) - -2. In the dialog that appears, set a name for the volume. Click **Next** to continue. +2. In the displayed dialog box, set a name for the volume. Click **Next** to continue. {{< notice note >}} @@ -56,10 +54,10 @@ There are two ways for you to use a snapshot to create a volume. 1. Log in to the web console of KubeSphere as `project-regular`. On the **Volumes** page of a project, click **Create**. -2. In the dialog that appears, set a name for the volume. Click **Next** to continue. +2. In the displayed dialog box, set a name for the volume. Click **Next** to continue. -3. On the **Volume Settings** tab, select **Create a volume by VolumeSnapshot** under the **Method** section. Select a snapshot and an access mode, and click **Next** to continue. +3. On the **Volume Settings** tab, select **From Volume Snapshot** under the **Method** section. Select a snapshot and an access mode, and click **Next** to continue. 4. On the **Advanced Settings** tab, add metadata for the volume such as labels and annotations. Click **Create** to finish creating the volume. -5. You can see the volume created appear on the **Volumes** page. \ No newline at end of file +5. The volume created is displayed on the **Volumes** page. \ No newline at end of file diff --git a/content/en/docs/project-user-guide/storage/volumes.md b/content/en/docs/project-user-guide/storage/volumes.md index ce1b280c9..e08f47320 100644 --- a/content/en/docs/project-user-guide/storage/volumes.md +++ b/content/en/docs/project-user-guide/storage/volumes.md @@ -1,7 +1,7 @@ --- title: "Volumes" -keywords: 'Kubernetes, persistent volumes, persistent volume claims, volume clone, volume snapshot, volume expanding' -description: 'Learn how to create, edit, and mount a volume in KubeSphere.' +keywords: 'Kubernetes, Persistent Volumes, Persistent Volume Claims, Volume Clone, Volume Snapshot, Volume Expansion' +description: 'Learn how to create, edit, and mount a volume on KubeSphere.' linkTitle: "Volumes" weight: 10310 --- @@ -10,11 +10,11 @@ When you create an application workload in a project, you can create a [Persiste Cluster administrators configure PersistentVolumes using storage classes. In other words, to create a PersistentVolumeClaim in a project, your cluster must have an available storage class. If no customized storage class is configured when you install KubeSphere, [OpenEBS](https://openebs.io/) is installed in your cluster by default to provide Local Persistent Volumes. However, it does not support dynamic volume provisioning. In a production environment, it is recommended you configure storage classes in advance to provide persistent storage services for your apps. -This tutorial demonstrates how to create a volume, mount a volume and use volume features from its detail page. +This tutorial demonstrates how to create a volume, mount a volume and use volume features from its details page. ## Prerequisites -- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace, a project and a user (`project-regular`). The user must be invited to the project with the role of `operator`. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). - If you want to dynamically provision a volume, you need to [configure a storage class](../../../cluster-administration/persistent-volume-and-storage-class/) that supports dynamic provisioning. @@ -26,31 +26,29 @@ All the volumes that are created on the **Volumes** page are PersistentVolumeCla 2. To create a volume, click **Create** on the **Volumes** page. -3. In the dialog that appears, set a name (for example, `demo-volume`) for the volume and click **Next**. +3. In the displayed dialog box, set a name (for example, `demo-volume`) for the volume and click **Next**. {{< notice note >}} - You can see the volume's manifest file in YAML format by enabling **Edit Mode** in the top-right corner. KubeSphere allows you to edit the manifest file directly to create a volume. Alternatively, you can follow the steps below to create a volume via the dashboard. + You can see the volume's manifest file in YAML format by enabling **Edit YAML** in the upper-right corner. KubeSphere allows you to edit the manifest file directly to create a volume. Alternatively, you can follow the steps below to create a volume via the dashboard. {{}} -4. On the **Volume Settings** page, select a method to create a volume. +4. On the **Storage Settings** page, select a method to create a volume. - - **Create a volume by StorageClass**. You can configure storage classes both [before](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) and [after](../../../cluster-administration/persistent-volume-and-storage-class/) the installation of KubeSphere. + - **From Storage Class**. You can configure storage classes both [before](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/) and [after](../../../cluster-administration/persistent-volume-and-storage-class/) the installation of KubeSphere. - - **Create a volume by VolumeSnapshot**. To use a snapshot to create a volume, you must create a volume snapshot first. + - **From Volume Snapshot**. To use a snapshot to create a volume, you must create a volume snapshot first. - Select **Create a volume by StorageClass** in this example. For more information about how to create a volume by snapshot, see [Volume Snapshots](../volume-snapshots/). + Select **From Storage Class** in this example. For more information about how to create a volume by snapshot, see [Volume Snapshots](../volume-snapshots/). 5. Select a storage class from the drop-down list. This tutorial uses `csi-standard`, a standard storage class provided by QingCloud Platform. You can select your own storage class. - ![select-storage-class](/images/docs/project-user-guide/volume-management/volumes/select-storage-class.jpg) - 6. Depending on the storage class you select, you may see different access modes in this section as some PersistentVolumes only support specific access modes. In total, there are three access modes. - - **ReadWriteOnce (RWO)**: The volume can be mounted as read-write by a single node. - - **ReadOnlyMany (ROX)**: The volume can be mounted as read-only by many nodes. - - **ReadWriteMany (RWX)**: The volume can be mounted as read-write by many nodes. + - **ReadWriteOnce**: The volume can be mounted as read-write by a single node. + - **ReadOnlyMany**: The volume can be mounted as read-only by many nodes. + - **ReadWriteMany**: The volume can be mounted as read-write by many nodes. Select the desired access mode. @@ -60,24 +58,18 @@ All the volumes that are created on the **Volumes** page are PersistentVolumeCla 9. Click **Create** to finish creating a volume. -10. A created volume displays on the **Volumes** page in a project. After it is mounted to a workload, it will turn to **Mounted** under the **Mount** column. - - ![volume-status](/images/docs/project-user-guide/volume-management/volumes/volume-status.jpg) +10. A created volume displays on the **Volumes** page in a project. After it is mounted to a workload, it will turn to **Mounted** under the **Mount Status** column. {{< notice note >}} -Newly-created volumes will also appear on the **Volumes** page in **Cluster Management**. Generally, this section is not available to project users such as `project-regular`. Cluster administrators have the responsibility to view and keep track of created volumes in a project. Conversely, if a cluster administrator creates a volume for a project in **Cluster Management**, the volume also appears on the **Volumes** page in a project. +Newly-created volumes is also displayed on the **Volumes** page in **Cluster Management**. Project users such as `project-regular` can view volume instances under the **Volume Instance** column. Cluster administrators have the responsibility to view and keep track of created volumes in a project. Conversely, if a cluster administrator creates a volume for a project in **Cluster Management**, the volume is also displayed on the **Volumes** page in a project. {{}} 11. For some volumes, you can see the status reach **Bound** from **Pending** immediately after they are created as they are provisioned dynamically. For volumes that remain in the **Pending** status, they will turn to **Bound** once they are mounted to a workload. The difference is decided by the storage class of the volume. - ![local-pending](/images/docs/project-user-guide/volume-management/volumes/local-pending.jpg) - For example, if you install KubeSphere with the default storage class (OpenEBS), you can only create local volumes, which means dynamic provisioning is not supported. This is specified by the `volumeBindingMode` field which is set to `WaitForFirstConsumer`. - ![volumebindingmode](/images/docs/project-user-guide/volume-management/volumes/volumebindingmode.jpg) - ## Mount a Volume When you create application workloads, such as [Deployments](../../../project-user-guide/application-workloads/deployments/), [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/) and [DaemonSets](../../../project-user-guide/application-workloads/daemonsets/), you can mount volumes to them. @@ -88,15 +80,13 @@ This tutorial does not explain how to create workloads. For more information, se {{}} -On the **Mount Volumes** page, you can see there are different volumes that you can mount to your workload. - -![volume-page](/images/docs/project-user-guide/volume-management/volumes/volume-page.jpg) +On the **Volume Settings** page, you can see there are different volumes that you can mount to your workload. - **Add Volume Template** (Only available to [StatefulSets](../../../project-user-guide/application-workloads/statefulsets/)): A volume template is used to dynamically create a PVC. Mount the PVC of the StorageClass type to the Pod by setting the name, storage class, access mode, capacity and path, which are all indicated by the field `volumeClaimTemplates`. -- **Add Volume**: Support emptyDir volumes and PVCs. +- **Mount Volume**: Support emptyDir volumes and PVCs. - In **Add Volume**, there are 3 kinds of volumes: + In **Mount Volume**, there are 3 kinds of volumes: - **Existing Volume**: Use a PVC to mount. @@ -106,7 +96,7 @@ On the **Mount Volumes** page, you can see there are different volumes that you The temporary storage volume represents [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), which is first created when a Pod is assigned to a node, and exists as long as that Pod is running on that node. An emptyDir volume offers an empty directory from which containers in the Pod can read and write. Depending on your deployment environment, an emptyDir volume can be stored on any medium that is backing the node, which could be a disk or SSD. When the Pod is removed from the node for any reason, the data in the emptyDir is deleted forever. - - **HostPath**: Use a hostPath volume to mount. + - **HostPath Volume**: Use a hostPath volume to mount. A HostPath volume mounts a file or directory from the host node's filesystem into your Pod. This is not something that most Pods will need, but it offers a powerful escape hatch for some applications. For more information, refer to [the Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath). @@ -126,11 +116,9 @@ After a volume is created, you can see detailed information of it, edit it, or l ### Edit a volume -On the detail page, you can click **Edit Information** to change its basic information. Click **More** and you can edit its YAML file or delete this volume. +On the details page, you can click **Edit Information** to change its basic information. Click **More** and you can edit its YAML file or delete this volume. -To delete a volume, make sure the volume is not mounted to any workload. To unmount a volume, go to the detail page of a workload. From the **More** drop-down list, click **Edit Config Template**. Select **Volume** from the pop-up window, and click the dustbin icon to unmount it. - -![delete-volume](/images/docs/project-user-guide/volume-management/volumes/delete-volume.jpg) +To delete a volume, make sure the volume is not mounted to any workload. To unmount a volume, go to the details page of a workload. From the **More** drop-down list, click **Edit Settings**. Select **Volumes** from the pop-up window, and click the dustbin icon to unmount it. If the status of a volume remains **Terminating** for a long time after you clicked **Delete**, manually delete it by using the following command: @@ -142,11 +130,9 @@ kubectl patch pvc -p '{"metadata":{"finalizers":null}}' From the **More** drop-down menu, there are three additional options provided by KubeSphere based on the underlying storage plugin, also known as `Storage Capability`. Volume features include: -- Clone a volume: Create a same volume. -- Create a volume snapshot: Create a volume snapshot which can be used to create volumes. For more information, see [Volume Snapshots](../volume-snapshots/). -- Expand a volume: Increase the size of a volume. Keep in mind that you cannot reduce the size of a volume on the console due to possible data loss. - -![volume-detail-page](/images/docs/project-user-guide/volume-management/volumes/volume-detail-page.jpg) +- **Clone**: Create a same volume. +- **Create Snapshot**: Create a volume snapshot which can be used to create volumes. For more information, see [Volume Snapshots](../volume-snapshots/). +- **Expand**: Increase the size of a volume. Keep in mind that you cannot reduce the size of a volume on the console due to possible data loss. For more information about `Storage Capability`, see [Design Documentation](https://github.com/kubesphere/community/blob/master/sig-storage/concepts-and-designs/storage-capability-interface.md). @@ -160,6 +146,4 @@ Some in-tree or special CSI plugins may not be covered by `Storage Capability`. KubeSphere retrieves metric data of PVCs with `Filesystem` mode from Kubelet to monitor volumes including capacity usage and inode usage. -![volume-monitoring](/images/docs/project-user-guide/volume-management/volumes/volume-monitoring.jpg) - For more information about volume monitoring, see [Research on Volume Monitoring](https://github.com/kubesphere/kubesphere/issues/2921). diff --git a/content/en/docs/quick-start/all-in-one-on-linux.md b/content/en/docs/quick-start/all-in-one-on-linux.md index de7db307a..3daec1c96 100644 --- a/content/en/docs/quick-start/all-in-one-on-linux.md +++ b/content/en/docs/quick-start/all-in-one-on-linux.md @@ -17,7 +17,7 @@ For those who are new to KubeSphere and looking for a quick way to discover the To get started with all-in-one installation, you only need to prepare one host according to the following requirements for hardware and operating system. -### Hardware Recommendations +### Hardware recommendations @@ -151,7 +151,7 @@ Perform the following steps to download KubeKey. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or run the following command: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -167,7 +167,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -182,7 +182,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -203,12 +203,12 @@ You only need to run one command for all-in-one installation. The template is as To create a Kubernetes cluster with KubeSphere installed, refer to the following command as an example: ```bash -./kk create cluster --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk create cluster --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8 and v1.20.4. If you do not specify a Kubernetes version, KubeKey installs Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). +- Recommended Kubernetes versions for KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x and v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey installs Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). - For all-in-one installation, you do not need to change any configuration. - If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed. KubeKey will install Kubernetes only. If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed. - KubeKey will install [OpenEBS](https://openebs.io/) to provision LocalPV for the development and testing environment by default, which is convenient for new users. For other storage classes, see [Persistent Storage Configurations](../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/). @@ -259,9 +259,7 @@ You may need to configure port forwarding rules and open the port in your securi {{}} -After logging in to the console, you can check the status of different components in **Components**. You may need to wait for some components to be up and running if you want to use related services. You can also use `kubectl get pod --all-namespaces` to inspect the running status of KubeSphere workloads. - -![kubesphere-components](/images/docs/quickstart/all-in-one-installation/kubesphere-components.png) +After logging in to the console, you can check the status of different components in **System Components**. You may need to wait for some components to be up and running if you want to use related services. You can also use `kubectl get pod --all-namespaces` to inspect the running status of KubeSphere workloads. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/quick-start/create-workspace-and-project.md b/content/en/docs/quick-start/create-workspace-and-project.md index e2685b81d..9d1817fb5 100644 --- a/content/en/docs/quick-start/create-workspace-and-project.md +++ b/content/en/docs/quick-start/create-workspace-and-project.md @@ -1,8 +1,8 @@ --- -title: "Create Workspaces, Projects, Accounts and Roles" -keywords: 'KubeSphere, Kubernetes, Multi-tenant, Workspace, Account, Role, Project' +title: "Create Workspaces, Projects, Users and Roles" +keywords: 'KubeSphere, Kubernetes, Multi-tenant, Workspace, User, Role, Project' description: 'Take advantage of the multi-tenant system of KubeSphere for fine-grained access control at different levels.' -linkTitle: "Create Workspaces, Projects, Accounts and Roles" +linkTitle: "Create Workspaces, Projects, Users and Roles" weight: 2300 --- @@ -22,9 +22,9 @@ You can create multiple workspaces within a KubeSphere cluster. Under each works ## Hands-on Lab -### Step 1: Create an account +### Step 1: Create a user -After KubeSphere is installed, you need to add different users with varied roles to the platform so that they can work at different levels on various resources. Initially, you only have one default account, which is `admin`, granted the role `platform-admin`. In the first step, you create an account `user-manager` and further create more accounts as `user-manager`. +After KubeSphere is installed, you need to add different users with varied roles to the platform so that they can work at different levels on various resources. Initially, you only have one default user, which is `admin`, granted the role `platform-admin`. In the first step, you create a user `user-manager` and further create more users as `user-manager`. 1. Log in to the web console as `admin` with the default account and password (`admin/P@88w0rd`). @@ -32,7 +32,7 @@ After KubeSphere is installed, you need to add different users with varied roles For account security, it is highly recommended that you change your password the first time you log in to the console. To change your password, select **User Settings** in the drop-down list in the upper-right corner. In **Password Settings**, set a new password. You also can change the console language in **User Settings**. {{}} -2. Click **Platform** in the upper-left corner, and then select **Access Control**. In the left nevigation pane, select **Account Roles**. There are four built-in roles as shown in the following table. +2. Click **Platform** in the upper-left corner, and then select **Access Control**. In the left nevigation pane, select **Platform Roles**. There are four built-in roles as shown in the following table.
@@ -64,11 +64,9 @@ After KubeSphere is installed, you need to add different users with varied roles Built-in roles are created automatically by KubeSphere and cannot be edited or deleted. {{}} -3. In **Accounts**, click **Create**. In the displayed dialog box, provide all the necessary information (marked with *) and select `users-manager` for **Role**. Refer to the following image as an example. +3. In **Users**, click **Create**. In the displayed dialog box, provide all the necessary information (marked with *) and select `users-manager` for **Role**. Refer to the following image as an example. - ![create-account](/images/docs/quickstart/create-workspaces-projects-accounts/create-account.png) - - Click **OK** after you finish. The new account will display on the **Accounts** page. + Click **OK** after you finish. The new account will display on the **Users** page. 4. Log out of the console and log back in with the account `user-manager` to create four accounts that will be used in other tutorials. @@ -106,8 +104,6 @@ After KubeSphere is installed, you need to add different users with varied roles 5. Verify the four accounts created. - ![account-list](/images/docs/quickstart/create-workspaces-projects-accounts/account-list.png) - ### Step 2: Create a workspace In this step, you create a workspace using the account `ws-manager` created in the previous step. As the basic logic unit for the management of projects, DevOps projects and organization members, workspaces underpin the multi-tenant system of KubeSphere. @@ -122,7 +118,7 @@ In this step, you create a workspace using the account `ws-manager` created in t {{}} -3. Log out of the console and log back in as `ws-admin`. In **Workspace Settings**, select **Workspace Members** and click **Invite Member**. +3. Log out of the console and log back in as `ws-admin`. In **Workspace Settings**, select **Workspace Members** and click **Invite**. 4. Invite both `project-admin` and `project-regular` to the workspace. Assign them the role `workspace-self-provisioner` and `workspace-viewer` respectively and click **OK**. @@ -130,9 +126,7 @@ In this step, you create a workspace using the account `ws-manager` created in t The actual role name follows a naming convention: `-`. For example, in this workspace named `demo-workspace`, the actual role name of the role `viewer` is `demo-workspace-viewer`. {{}} - ![invite-member](/images/docs/quickstart/create-workspaces-projects-accounts/invite-member.png) - -5. In **Workspace Members**, you can see three members listed. +5. After you add both `project-admin` and `project-regular` to the workspace, click **OK**. In **Workspace Members**, you can see three members listed.
@@ -162,51 +156,45 @@ The actual role name follows a naming convention: `-` In this step, you create a project using the account `project-admin` created in the previous step. A project in KubeSphere is the same as a namespace in Kubernetes, which provides virtual isolation for resources. For more information, see [Namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). -1. Log in to the KubeSphere web console as `project-admin`. In **Projects**, click **Create**. +1. Log in to KubeSphere as `project-admin`. In **Projects**, click **Create**. 2. Enter the project name (for example, `demo-project`) and click **OK** to finish. You can also add an alias and description for the project. -3. In **Projects**, click the project name to view its details page. +3. In **Projects**, click the project created just now to view its detailed information. -4. On the **Overview** page of the project, the project quota remains unset by default. You can click **Set** and specify [resource requests and limits](../../workspace-administration/project-quotas/) as needed (for example, 1 Core for CPU and 1000 Gi for memory). +4. On the **Overview** page of the project, the project quota remains unset by default. You can click **Edit Quotas** and specify [resource requests and limits](../../workspace-administration/project-quotas/) as needed (for example, 1 core for CPU and 1000Gi for memory). - ![project-quota](/images/docs/quickstart/create-workspaces-projects-accounts/project-quota.png) - -5. Invite `project-regular` to this project and grant this user the role `operator`. Refer to the following image for specific steps. - - ![invite-project-regular](/images/docs/quickstart/create-workspaces-projects-accounts/invite-project-regular.png) +5. Invite `project-regular` to this project and grant this user the role `operator`. {{< notice info >}} The user granted the role `operator` is a project maintainer who can manage resources other than users and roles in the project. {{}} -6. Before creating a [Route](../../project-user-guide/application-workloads/routes/) which is [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) in Kubernetes, you need to enable a gateway for this project. The gateway is an [NGINX Ingress controller](https://github.com/kubernetes/ingress-nginx) running in the project. To set a gateway, go to **Advanced Settings** in **Project Settings** and click **Set Gateway**. The account `project-admin` is still used in this step. +6. Before creating a [Route](../../project-user-guide/application-workloads/routes/) which is [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) in Kubernetes, you need to enable a gateway for this project. The gateway is an [NGINX Ingress controller](https://github.com/kubernetes/ingress-nginx) running in the project. To set a gateway, go to **Gateway Settings** in **Project Settings** and click **Enable Gateway**. The account `project-admin` is still used in this step. -7. Select the access method **NodePort** and click **Save**. +7. Select the access method **NodePort** and click **OK**. -8. Under **Internet Access**, you can obtain the Gateway Address and the NodePort of http and https in the list. +8. Under **Project Gateway**, you can obtain the Gateway Address and the NodePort of http and https in the list. {{< notice note >}} If you want to expose services using the type `LoadBalancer`, you need to use the LoadBalancer plugin of cloud providers. If your Kubernetes cluster is running in a bare metal environment, it is recommended that you use [OpenELB](https://github.com/kubesphere/openelb) as the LoadBalancer plugin. {{}} - ![nodeport-setting](/images/docs/quickstart/create-workspaces-projects-accounts/nodeport-setting.png) - ### Step 4: Create a role After you finish the above steps, you know that users can be granted different roles at different levels. The roles used in previous steps are all built-in ones created by KubeSphere. In this step, you will learn how to define a customized role to meet the needs in your work. 1. Log in to the KubeSphere web console as `admin` again and go to **Access Control**. -2. Click **Account Roles** on the left navigation pane, and then click **Create** on the right. +2. Click **Platform Roles** on the left navigation pane, and then click **Create** on the right. {{< notice note >}} - The preset roles on the **Account Roles** page cannot be edited and deleted. + The preset roles on the **Platform Roles** page cannot be edited and deleted. {{}} -3. In the **Create Account Role** dialog box, set the name (for example, `clusters-admin`), alias, and description of the role, and click **Edit Permissions**. +3. In the **Create Platform Role** dialog box, set the name (for example, `clusters-admin`), alias, and description of the role, and click **Edit Permissions**. {{< notice note >}} @@ -225,9 +213,9 @@ After you finish the above steps, you know that users can be granted different r {{}} -5. On the **Account Roles** page, you can click the name of the created role to view the role details and click to edit the role, edit the role permissions, or delete the role. +5. On the **Platform Roles** page, you can click the name of the created role to view the role details and click to edit the role, edit the role permissions, or delete the role. -6. On the **Accounts** page, you can assign the role to an account when you create an account or edit an existing account. +6. On the **Users** page, you can assign the role to an account when you create an account or edit an existing account. ### Step 5: Create a DevOps project (Optional) @@ -240,12 +228,10 @@ To create a DevOps project, you must install the KubeSphere DevOps system in adv 1. Log in to the console as `project-admin`. In **DevOps Projects**, click **Create**. -2. Set a name for the DevOps project (for example, `demo-devops`) and click **OK**. You can also add an alias and description for the project. +2. Enter the DevOps project name (for example, `demo-devops`) and click **OK**. You can also add an alias and description for the project. -3. Click the project name to view its details page. +3. In **DevOps Projects**, click the project created just now to view its detailed information. -4. Go to **Project Management** and select **Project Members**. Click **Invite Member** to grant `project-regular` the role `operator`, who is allowed to create pipelines and credentials. Click **OK** to finish. - - ![invite-devops-member](/images/docs/quickstart/create-workspaces-projects-accounts/invite-devops-member.png) +4. Go to **Project Management** and select **Project Members**. Click **Invite** to grant `project-regular` the role of `operator`, who is allowed to create pipelines and credentials. You are now familiar with the multi-tenant management system of KubeSphere. In other tutorials, the account `project-regular` will also be used to demonstrate how to create applications and resources in a project or DevOps project. diff --git a/content/en/docs/quick-start/deploy-bookinfo-to-k8s.md b/content/en/docs/quick-start/deploy-bookinfo-to-k8s.md index c3f34d344..27d6aa122 100644 --- a/content/en/docs/quick-start/deploy-bookinfo-to-k8s.md +++ b/content/en/docs/quick-start/deploy-bookinfo-to-k8s.md @@ -16,7 +16,7 @@ To provide consistent user experiences of managing microservices, KubeSphere int - You need to enable [KubeSphere Service Mesh](../../pluggable-components/service-mesh/). -- You need to finish all tasks in [Create Workspaces, Projects, Accounts and Roles](../create-workspace-and-project/). +- You need to finish all tasks in [Create Workspaces, Projects, Users and Roles](../create-workspace-and-project/). - You need to enable **Application Governance**. For more information, see [Set a Gateway](../../project-administration/project-gateway/#set-a-gateway). @@ -44,9 +44,7 @@ The following figure shows the end-to-end architecture of the application. For m 1. Log in to the console as `project-regular` and go to your project (`demo-project`). Go to **Apps** under **Application Workloads**, and then click **Deploy Sample App** on the right of the page. -2. Click **Next** in the displayed dialog box where required fields are pre-populated and relevant components are already set. You do not need to change the settings and just click **Create** on the final page (**Internet Access**). - - ![create-bookinfo](/images/docs/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png) +2. Click **Next** in the displayed dialog box where required fields are pre-populated and relevant components are already set. You do not need to change the settings and just click **Create** on the final page. {{< notice note >}} @@ -54,24 +52,20 @@ KubeSphere creates the hostname automatically. To change the hostname, hover ove {{}} -3. In **Workloads**, verify that the statuses of all four Deployments reach `Running`, which means the app has been created successfully. - - ![running](/images/docs/quickstart/deploy-bookinfo-to-k8s/running.png) +1. In **Workloads**, verify that the statuses of all four Deployments reach `Running`, which means the app has been created successfully. {{< notice note >}}It may take a few minutes before the Deployments are up and running. {{}} ### Step 2: Access Bookinfo -1. In **Apps**, go to **Composing Apps** and click the app `bookinfo` to see its details page. +1. In **Apps**, go to **Composed Apps** and click the app `bookinfo` to see its details page. {{< notice note >}}If you do not see the app in the list, refresh your page. {{}} 2. On the details page, record the hostname and port number of the app which will be used to access Bookinfo. - ![detail-page](/images/docs/quickstart/deploy-bookinfo-to-k8s/detail-page.png) - 3. As the app will be accessed outside the cluster through a NodePort, you need to open the port in your security group for outbound traffic and set port forwarding rules if necessary. 4. Edit your local host file (`/etc/hosts`) by adding an entry in it to map the hostname to the IP address. For example: @@ -85,12 +79,10 @@ KubeSphere creates the hostname automatically. To change the hostname, hover ove Do not copy the preceding content to your local host file. Replace it with your own IP address and hostname. {{}} -5. When you finish, click to access the app. +5. When you finish, click **Access Service** to access the app. 6. On the app details page, click **Normal user** in the lower-left corner. - ![normal-user](/images/docs/quickstart/deploy-bookinfo-to-k8s/normal-user.png) - 7. In the following figure, you can notice that only **Reviewer1** and **Reviewer2** are displayed without any stars in the **Book Reviews** section. This is the status of this app version. To explore more features of traffic management, you can implement a [canary release](../../project-user-guide/grayscale-release/canary-release/) for this app. ![ratings-page](/images/docs/quickstart/deploy-bookinfo-to-k8s/ratings-page.png) diff --git a/content/en/docs/quick-start/enable-pluggable-components.md b/content/en/docs/quick-start/enable-pluggable-components.md index 93f5de5fa..041a51d55 100644 --- a/content/en/docs/quick-start/enable-pluggable-components.md +++ b/content/en/docs/quick-start/enable-pluggable-components.md @@ -20,7 +20,7 @@ This tutorial demonstrates how to enable pluggable components of KubeSphere both | `kubeedge` | KubeEdge | Add edge nodes to your cluster and run workloads on them. | | `openpitrix` | KubeSphere App Store | Provide an app store for Helm-based applications and allow users to manage apps throughout the entire lifecycle. | | `servicemesh` | KubeSphere Service Mesh (Istio-based) | Provide fine-grained traffic management, observability and tracing, and visualized traffic topology. | -| `ippool` | Pod IP Pool | Create Pod IP Pools and assign IP addresses from the Pools to your Pods. | +| `ippool` | Pod IP Pool | Create Pod IP pools and assign IP addresses from the Pools to your Pods. | | `topology` | Service Topology | Integrate [Weave Scope](https://www.weave.works/oss/scope/) to view service-to-service communication (topology) of your apps and containers. | For more information about each component, see [Overview of Enable Pluggable Components](../../pluggable-components/overview/). @@ -32,7 +32,7 @@ For more information about each component, see [Overview of Enable Pluggable Com {{}} -## Enable Pluggable Components before Installation +## Enable Pluggable Components Before Installation For most of the pluggable components, you can follow the steps below to enable them. If you need to enable [KubeEdge](../../pluggable-components/kubeedge/), [Pod IP Pools](../../pluggable-components/pod-ip-pools/) and [Service Topology](../../pluggable-components/service-topology/), refer to the corresponding tutorials directly. @@ -50,7 +50,7 @@ When you implement multi-node installation of KubeSphere on Linux, you need to c If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a `config-sample.yaml` file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (for example, for testing purpose), refer to the [following section](#enable-pluggable-components-after-installation) to see how pluggable components can be installed after installation. {{}} -2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [the complete file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) for your reference. Save the file after you finish. +2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [the complete file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) for your reference. Save the file after you finish. 3. Create a cluster using the configuration file: @@ -73,16 +73,14 @@ When you install KubeSphere on Kubernetes, you need to use [ks-installer](https: 3. Save this local file and execute the following commands to start installation. ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -Whether you install KubeSphere on Linux or on Kubernetes, you can check the status of the components you have enabled in the web console of KubeSphere after installation. Go to **Components**, and you can see an image below: +Whether you install KubeSphere on Linux or on Kubernetes, you can check the status of the components you have enabled in the web console of KubeSphere after installation. Go to **System Components**, and you can see the component status. -![component-status](/images/docs/quickstart/enable-pluggable-components/component-status.png) - -## Enable Pluggable Components after Installation +## Enable Pluggable Components After Installation The KubeSphere web console provides a convenient way for users to view and operate on different resources. To enable pluggable components after installation, you only need to make few adjustments on the console directly. For those who are accustomed to the Kubernetes command-line tool, kubectl, they will have no difficulty in using KubeSphere as the tool is integrated into the console. @@ -100,9 +98,9 @@ If you need to enable [KubeEdge](../../pluggable-components/kubeedge/), [Pod IP A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects. {{}} -3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**. +3. In **Custom Resources**, click the three dots on the right of `ks-installer` and select **Edit YAML**. -4. In this YAML file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. After you finish, click **Update** to save the configuration. +4. In this YAML file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. After you finish, click **OK** to save the configuration. 5. You can use the web kubectl to check the installation process by executing the following command: @@ -140,9 +138,7 @@ You can find the web kubectl tool by clicking the hammer icon in the bottom-righ ##################################################### ``` -7. In **Components**, you can see the status of different components. - - ![component-status-page](/images/docs/quickstart/enable-pluggable-components/component-status-page.png) +7. In **System Components**, you can see the status of different components. {{< notice tip >}} diff --git a/content/en/docs/quick-start/minimal-kubesphere-on-k8s.md b/content/en/docs/quick-start/minimal-kubesphere-on-k8s.md index 94e20b2d1..23de75e71 100644 --- a/content/en/docs/quick-start/minimal-kubesphere-on-k8s.md +++ b/content/en/docs/quick-start/minimal-kubesphere-on-k8s.md @@ -11,7 +11,7 @@ In addition to installing KubeSphere on a Linux machine, you can also deploy it ## Prerequisites -- To install KubeSphere v3.1.1 on Kubernetes, your Kubernetes version must be v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- To install KubeSphere 3.2.1 on Kubernetes, your Kubernetes version must be v1.19.x, v1.20.x, v1.21.x or v1.22.x (experimental). - Make sure your machine meets the minimal hardware requirement: CPU > 1 Core, Memory > 2 GB. - A **default** Storage Class in your Kubernetes cluster needs to be configured before the installation. @@ -33,9 +33,9 @@ After you make sure your machine meets the conditions, perform the following ste 1. Run the following commands to start installation: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 2. After KubeSphere is successfully installed, you can run the following command to view the installation logs: @@ -52,9 +52,7 @@ After you make sure your machine meets the conditions, perform the following ste 4. Make sure port `30880` is opened in your security group and access the web console through the NodePort (`IP:30880`) with the default account and password (`admin/P@88w0rd`). -5. After logging in to the console, you can check the status of different components in **Components**. You may need to wait for some components to be up and running if you want to use related services. - - ![kubesphere-components](/images/docs/quickstart/minimal-installation-on-k8s/kubesphere-components.png) +5. After logging in to the console, you can check the status of different components in **System Components**. You may need to wait for some components to be up and running if you want to use related services. ## Enable Pluggable Components (Optional) diff --git a/content/en/docs/quick-start/wordpress-deployment.md b/content/en/docs/quick-start/wordpress-deployment.md index ed8fa94e0..16c6e32fa 100644 --- a/content/en/docs/quick-start/wordpress-deployment.md +++ b/content/en/docs/quick-start/wordpress-deployment.md @@ -18,7 +18,7 @@ This tutorial demonstrates how to create an application (WordPress as an example ## Prerequisites -An account `project-regular` is needed with the role of `operator` assigned in one of your projects (the user has been invited to the project). For more information, see [Create Workspaces, Projects, Accounts and Roles](../create-workspace-and-project/). +An account `project-regular` is needed with the role of `operator` assigned in one of your projects (the user has been invited to the project). For more information, see [Create Workspaces, Projects, Users and Roles](../create-workspace-and-project/). ## Estimated Time @@ -32,31 +32,21 @@ About 15 minutes. The environment variable `WORDPRESS_DB_PASSWORD` is the password to connect to the database in WordPress. In this step, you need to create a Secret to store the environment variable that will be used in the MySQL Pod template. -1. Log in to the KubeSphere console using the account `project-regular`. Go to the detail page of `demo-project` and navigate to **Configurations**. In **Secrets**, click **Create** on the right. +1. Log in to the KubeSphere console using the account `project-regular`. Go to the detail page of `demo-project` and navigate to **Configuration**. In **Secrets**, click **Create** on the right. - ![create-secrets1](/images/docs/quickstart/wordpress-deployment/create-secrets1.png) - -2. Enter the basic information (for example, name it `mysql-secret`) and click **Next**. On the next page, select **Opaque (Default)** for **Type** and click **Add Data** to add a key-value pair. Enter the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) as below and click **√** in the bottom-right corner to confirm. When you finish, click **Create** to continue. - - ![key-value1](/images/docs/quickstart/wordpress-deployment/key-value1.png) +2. Enter the basic information (for example, name it `mysql-secret`) and click **Next**. On the next page, select **Default** for **Type** and click **Add Data** to add a key-value pair. Enter the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) and click **√** in the lower-right corner to confirm. When you finish, click **Create** to continue. #### Create a WordPress Secret -Follow the same steps above to create a WordPress Secret `wordpress-secret` with the key `WORDPRESS_DB_PASSWORD` and value `123456`. Secrets created display in the list as below: - -![wordpress-secrets1](/images/docs/quickstart/wordpress-deployment/wordpress-secrets1.png) +Follow the same steps above to create a WordPress Secret `wordpress-secret` with the key `WORDPRESS_DB_PASSWORD` and value `123456`. Secrets created display in the list. ### Step 2: Create a volume 1. Go to **Volumes** under **Storage** and click **Create**. - ![volumes1](/images/docs/quickstart/wordpress-deployment/volumes1.png) - 2. Enter the basic information of the volume (for example, name it `wordpress-pvc`) and click **Next**. -3. In **Volume Settings**, you need to choose an available **Storage Class**, and set **Access Mode** and **Volume Capacity**. You can use the default value directly as shown below. Click **Next** to continue. - - ![volume-settings1](/images/docs/quickstart/wordpress-deployment/volume-settings1.png) +3. In **Volume Settings**, you need to choose an available **Storage Class**, and set **Access Mode** and **Volume Capacity**. You can use the default value directly. Click **Next** to continue. 4. For **Advanced Settings**, you do not need to add extra information for this step and click **Create** to finish. @@ -64,31 +54,19 @@ Follow the same steps above to create a WordPress Secret `wordpress-secret` with #### Add MySQL backend components -1. Navigate to **Apps** under **Application Workloads**, select **Composing Apps** and click **Create Composing App**. +1. Navigate to **Apps** under **Application Workloads**, select **Composed Apps** and click **Create**. - ![composing-app1](/images/docs/quickstart/wordpress-deployment/composing-app1.png) +2. Enter the basic information (for example, `wordpress` for **Name**) and click **Next**. -2. Enter the basic information (for example, `wordpress` for **App Name**) and click **Next**. +3. In **Service Settings**, click **Create Service** to create a service in the app. - ![basic-info1](/images/docs/quickstart/wordpress-deployment/basic-info1.png) - -3. In **Components**, click **Add Service** to set a component in the app. - - ![add-service1](/images/docs/quickstart/wordpress-deployment/add-service1.png) - -4. Define a service type for the component. Select **Stateful Service** here. +4. Select **Stateful Service** to define the service type. 5. Enter the name for the stateful service (for example, **mysql**) and click **Next**. - ![mysqlname1](/images/docs/quickstart/wordpress-deployment/mysqlname1.png) +6. In **Containers**, click **Add Container**. -6. In **Container Image**, click **Add Container Image**. - - ![container-image1](/images/docs/quickstart/wordpress-deployment/container-image1.png) - -7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click **√** in the bottom-right corner as the setting is not finished yet. - - ![add-container1](/images/docs/quickstart/wordpress-deployment/add-container1.png) +7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click **√** in the lower-right corner as the setting is not finished yet. {{< notice note >}} @@ -96,35 +74,21 @@ In **Advanced Settings**, make sure the memory limit is no less than 1000 Mi or {{}} -8. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Enter the name `MYSQL_ROOT_PASSWORD` and choose the resource `mysql-secret` and the key `MYSQL_ROOT_PASSWORD` created in the previous step. Click **√** after you finish and **Next** to continue. +1. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Enter the name `MYSQL_ROOT_PASSWORD` and choose the resource `mysql-secret` and the key `MYSQL_ROOT_PASSWORD` created in the previous step. Click **√** after you finish and **Next** to continue. - ![environment-var1](/images/docs/quickstart/wordpress-deployment/environment-var1.png) - -9. Select **Add Volume Template** in **Mount Volumes**. Enter the value of **Volume Name** (`mysql`) and **Mount Path** (mode: `ReadAndWrite`, path: `/var/lib/mysql`) as below: - - ![volume-template1](/images/docs/quickstart/wordpress-deployment/volume-template1.png) +2. Click **Add Volume Template** under **Volume Templates**. Enter the value of **Volume Name** (`mysql`) and **Mount Path** (mode: `ReadAndWrite`, path: `/var/lib/mysql`). Click **√** after you finish and click **Next** to continue. -10. In **Advanced Settings**, you can click **Add** directly or select other options based on your needs. - - ![advanced-settings1](/images/docs/quickstart/wordpress-deployment/advanced-settings1.png) - -11. The MySQL component has beed added as shown below: - - ![mysql-finished1](/images/docs/quickstart/wordpress-deployment/mysql-finished1.png) +3. In **Advanced Settings**, you can click **Create** directly or set other options based on your needs. #### Add the WordPress frontend component -12. Click **Add Service** again and select **Stateless Service** this time. Enter the name `wordpress` and click Next. +12. In **Services** under **Application Workloads**, click **Create** again and select **Stateless Service** this time. Enter the name `wordpress` and click **Next**. - ![name-wordpress-1](/images/docs/quickstart/wordpress-deployment/name-wordpress-1.png) +13. Similar to previous steps, click **Add Container**, enter `wordpress:4.8-apache` in the search box, press **Enter** and click **Use Default Ports**. -13. Similar to the step above, click **Add Container Image**, enter `wordpress:4.8-apache` in the search box, press **Enter** and click **Use Default Ports**. - - ![container-image-page1](/images/docs/quickstart/wordpress-deployment/container-image-page1.png) - -14. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Two environment variables need to be added here. Enter the values according to the screenshot below. +14. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Two environment variables need to be added here. Enter the values as follows. - For `WORDPRESS_DB_PASSWORD`, choose `wordpress-secret` and `WORDPRESS_DB_PASSWORD` created in Task 1. @@ -132,65 +96,37 @@ In **Advanced Settings**, make sure the memory limit is no less than 1000 Mi or {{< notice warning >}} -For the second environment variable added here, the value must be exactly the same as the name you set for MySQL in step 5. Otherwise, Wordpress cannot connect to the corresponding database of MySQL. +For the second environment variable added here, the value must be the same as the name you set for MySQL in step 5. Otherwise, WordPress cannot connect to the corresponding database of MySQL. {{}} - - ![environment-varss1](/images/docs/quickstart/wordpress-deployment/environment-varss1.png) Click **√** to save it and **Next** to continue. -15. In **Mount Volumes**, click **Add Volume** and select **Choose an existing volume**. +1. Under **Volumes**, click **Mount Volume**, and then click **Select Volume**. - ![add-volume-page1](/images/docs/quickstart/wordpress-deployment/add-volume-page1.png) +2. Select `wordpress-pvc` created in the previous step, set the mode as `ReadAndWrite`, and enter `/var/www/html` as its mount path. Click **√** to save it, and then click **Next** to continue. - ![choose-existing-volume1](/images/docs/quickstart/wordpress-deployment/choose-existing-volume1.png) +3. In **Advanced Settings**, you can click **Create** directly or set other options based on your needs. -16. Select `wordpress-pvc` created in the previous step, set the mode as `ReadAndWrite`, and enter `/var/www/html` as its mount path. Click **√** to save it and **Next** to continue. +4. The frontend component is also set now. Click **Next** to continue. - ![mount-volume-page1](/images/docs/quickstart/wordpress-deployment/mount-volume-page1.png) +5. You can set route rules (Ingress) here or click **Create** directly. -17. In **Advanced Settings**, you can click **Add** directly or select other options based on your needs. - - ![advanced1](/images/docs/quickstart/wordpress-deployment/advanced1.png) - -18. The frontend component is also set now. Click **Next** to continue. - - ![components-finished1](/images/docs/quickstart/wordpress-deployment/components-finished1.png) - -19. You can set route rules (Ingress) here or click **Create** directly. - - ![ingress-create1](/images/docs/quickstart/wordpress-deployment/ingress-create1.png) - -20. The app will display in the list below after you create it. - - ![application-created1](/images/docs/quickstart/wordpress-deployment/application-created1.png) +6. The app will display in the list after you create it. ### Step 4: Verify resources -In **Workloads**, check the status of `wordpress-v1` and `mysql-v1` in **Deployments** and **StatefulSets** respectively. If they are running as shown in the image below, it means WordPress has been created successfully. - -![wordpress-deployment1](/images/docs/quickstart/wordpress-deployment/wordpress-deployment1.png) - -![mysql-running1](/images/docs/quickstart/wordpress-deployment/mysql-running1.png) +In **Workloads**, check the status of `wordpress-v1` and `mysql-v1` in **Deployments** and **StatefulSets** respectively. If they are running properly, it means WordPress has been created successfully. ### Step 5: Access WordPress through a NodePort -1. To access the Service outside the cluster, navigate to **Services** first. Click the three dots on the right of `wordpress` and select **Edit Internet Access**. - - ![edit-internet-access1](/images/docs/quickstart/wordpress-deployment/edit-internet-access1.png) +1. To access the Service outside the cluster, navigate to **Services** first. Click the three dots on the right of `wordpress` and select **Edit External Access**. 2. Select `NodePort` for **Access Method** and click **OK**. - ![access-method](/images/docs/quickstart/wordpress-deployment/access-method.png) - 3. Click the Service and you can see the port is exposed. - ![nodeport-number1](/images/docs/quickstart/wordpress-deployment/nodeport-number1.png) - -4. Access this application at `{Node IP}:{NodePort}` and you can see an image as below: - - ![wordpress-page](/images/docs/quickstart/wordpress-deployment/wordpress-page.png) +4. Access this application at `{Node IP}:{NodePort}`. {{< notice note >}} diff --git a/content/en/docs/reference/api-docs.md b/content/en/docs/reference/api-docs.md index d91f7ce89..73a0ac115 100644 --- a/content/en/docs/reference/api-docs.md +++ b/content/en/docs/reference/api-docs.md @@ -45,12 +45,14 @@ curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \ 'http://[node ip]:31407/oauth/token' \ --data-urlencode 'grant_type=password' \ --data-urlencode 'username=admin' \ - --data-urlencode 'password=P#$$w0rd' + --data-urlencode 'password=P#$$w0rd' \ + --data-urlencode 'client_id=kubesphere' \ + --data-urlencode 'client_secret=kubesphere' ``` {{< notice note >}} -Replace `[node ip]` with your actual IP address. +Replace `[node ip]` with your actual IP address. You can configure client credentials in `ClusterConfiguration`, there is a default client credential `client_id` and `client_secret` is `kubesphere`. {{}} diff --git a/content/en/docs/reference/glossary.md b/content/en/docs/reference/glossary.md index efa74bf61..2337d3b2f 100644 --- a/content/en/docs/reference/glossary.md +++ b/content/en/docs/reference/glossary.md @@ -108,11 +108,11 @@ This glossary includes general terms and technical terms that are specific to Ku - **Allocated Memory**
The metric is calculated based on the total memory requests of Pods, for example, on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources. -- **Disk Log Collection**
- The capability to collect disk logs in a container and export to stdout, which will then be collected by the system log collector. +- **Log Collection**
+ The Log Collection function allows the system to collect container logs saved on volumes and send the logs to standard output. - **Notification Receiver**
- The channel to receive notifications, such as email, WeChat Work, Slack and webhook. + The channel to receive notifications, such as email, DingTalk, WeCom, Slack, and webhook. ## Network diff --git a/content/en/docs/release/release-v200.md b/content/en/docs/release/release-v200.md index 9572b4245..93d303f89 100644 --- a/content/en/docs/release/release-v200.md +++ b/content/en/docs/release/release-v200.md @@ -4,7 +4,7 @@ keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" description: "KubeSphere release notes for 2.0.0." linkTitle: "Release Notes - 2.0.0" -weight: 18800 +weight: 18900 --- KubeSphere 2.0.0 was released on **May 18th, 2019**. diff --git a/content/en/docs/release/release-v201.md b/content/en/docs/release/release-v201.md index c1bc9633f..d4f043ec7 100644 --- a/content/en/docs/release/release-v201.md +++ b/content/en/docs/release/release-v201.md @@ -4,7 +4,7 @@ keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" description: "KubeSphere release notes for 2.0.1." linkTitle: "Release Notes - 2.0.1" -weight: 18700 +weight: 18800 --- KubeSphere 2.0.1 was released on **June 9th, 2019**. diff --git a/content/en/docs/release/release-v202.md b/content/en/docs/release/release-v202.md index 5d7afee31..d655a0678 100644 --- a/content/en/docs/release/release-v202.md +++ b/content/en/docs/release/release-v202.md @@ -4,7 +4,7 @@ keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" description: "KubeSphere release notes for 2.0.2." linkTitle: "Release Notes - 2.0.2" -weight: 18600 +weight: 18700 --- KubeSphere 2.0.2 was released on July 9, 2019, which fixes known bugs and enhances existing feature. If you have installed versions of 1.0.x, 2.0.0 or 2.0.1, please download KubeSphere installer v2.0.2 to upgrade. diff --git a/content/en/docs/release/release-v210.md b/content/en/docs/release/release-v210.md index 4b0c328b0..87ddd7758 100644 --- a/content/en/docs/release/release-v210.md +++ b/content/en/docs/release/release-v210.md @@ -4,7 +4,7 @@ keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" description: "KubeSphere release notes for 2.1.0." linkTitle: "Release Notes - 2.1.0" -weight: 18500 +weight: 18600 --- KubeSphere 2.1.0 was released on Nov 11th, 2019, which fixes known bugs, adds some new features and brings some enhancement. If you have installed versions of 2.0.x, please upgrade it and enjoy the better user experience of v2.1.0. diff --git a/content/en/docs/release/release-v211.md b/content/en/docs/release/release-v211.md index 66d2ee41e..2417e04bb 100644 --- a/content/en/docs/release/release-v211.md +++ b/content/en/docs/release/release-v211.md @@ -4,7 +4,7 @@ keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus" description: "KubeSphere release notes for 2.1.1." linkTitle: "Release Notes - 2.1.1" -weight: 18400 +weight: 18500 --- KubeSphere 2.1.1 was released on Feb 23rd, 2020, which has fixed known bugs and brought some enhancements. For the users who have installed versions of 2.0.x or 2.1.0, make sure to read the user manual carefully about how to upgrade before doing that, and feel free to raise any questions on [GitHub](https://github.com/kubesphere/kubesphere/issues). diff --git a/content/en/docs/release/release-v300.md b/content/en/docs/release/release-v300.md index a9be7efa8..f8ee049c2 100644 --- a/content/en/docs/release/release-v300.md +++ b/content/en/docs/release/release-v300.md @@ -4,7 +4,7 @@ keywords: "Kubernetes, KubeSphere, release-notes" description: "KubeSphere release notes for 3.0.0." linkTitle: "Release Notes - 3.0.0" -weight: 18300 +weight: 18400 --- ## How to get v3.0.0 diff --git a/content/en/docs/release/release-v310.md b/content/en/docs/release/release-v310.md index e1267aa53..0405685b4 100644 --- a/content/en/docs/release/release-v310.md +++ b/content/en/docs/release/release-v310.md @@ -3,7 +3,7 @@ title: "Release Notes for 3.1.0" keywords: "Kubernetes, KubeSphere, release notes" description: "KubeSphere Release Notes for 3.1.0" linkTitle: "Release Notes - 3.1.0" -weight: 18200 +weight: 18300 --- ## How to Install v3.1.0 diff --git a/content/en/docs/release/release-v311.md b/content/en/docs/release/release-v311.md index cbf720564..7a7908ccf 100644 --- a/content/en/docs/release/release-v311.md +++ b/content/en/docs/release/release-v311.md @@ -3,7 +3,7 @@ title: "Release Notes for 3.1.1" keywords: "Kubernetes, KubeSphere, release notes" description: "KubeSphere Release Notes for 3.1.1" linkTitle: "Release Notes - 3.1.1" -weight: 18100 +weight: 18200 --- ## User Experience diff --git a/content/en/docs/release/release-v320.md b/content/en/docs/release/release-v320.md new file mode 100644 index 000000000..34a646a56 --- /dev/null +++ b/content/en/docs/release/release-v320.md @@ -0,0 +1,177 @@ +--- +title: "Release Notes for 3.2.0" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.2.0" +linkTitle: "Release Notes - 3.2.0" +weight: 18100 +--- + +## Multi-tenancy & Multi-cluster + +### New Features + +- Add support for setting the host cluster name in multi-cluster scenarios, which defaults to `host`. ([#4211](https://github.com/kubesphere/kubesphere/pull/4211), [@yuswift](https://github.com/yuswift)) +- Add support for setting the cluster name in single-cluster scenarios. ([#4220](https://github.com/kubesphere/kubesphere/pull/4220), [@yuswift](https://github.com/yuswift)) +- Add support for initializing the default cluster name by using globals.config. ([#2283](https://github.com/kubesphere/console/pull/2283), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add support for scheduling Pod replicas across multiple clusters when creating a Deployment. ([#2191](https://github.com/kubesphere/console/pull/2191), [@weili520](https://github.com/weili520)) +- Add support for changing cluster weights on the project details page. ([#2192](https://github.com/kubesphere/console/pull/2192), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Fix an issue in the **Create Deployment** dialog box in **Cluster Management**, where a multiple-cluster project can be selected by directly entering the project name. ([#2125](https://github.com/kubesphere/console/pull/2125), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an error that occurs when workspace or cluster basic information is edited. ([#2188](https://github.com/kubesphere/console/pull/2188), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Remove information about deleted clusters on the **Basic Information** page of the host cluster. ([#2211](https://github.com/kubesphere/console/pull/2211), [@fuchunlan](https://github.com/fuchunlan)) +- Add support for sorting Services and editing Service settings in multi-cluster projects. ([#2167](https://github.com/kubesphere/console/pull/2167), [@harrisonliu5](https://github.com/harrisonliu5)) +- Refactor the gateway feature of multi-cluster projects. ([#2275](https://github.com/kubesphere/console/pull/2275), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where multi-cluster projects cannot be deleted after the workspace is deleted. ([#4365](https://github.com/kubesphere/kubesphere/pull/4365), [@wansir](https://github.com/wansir)) + +## Observability + +### New Features + +- Add support for HTTPS communication with Elasticsearch. ([#4176](https://github.com/kubesphere/kubesphere/pull/4176), [@wanjunlei](https://github.com/wanjunlei)) +- Add support for setting GPU types when scheduling GPU workloads. ([#4225](https://github.com/kubesphere/kubesphere/pull/4225), [@zhu733756](https://github.com/zhu733756)) +- Add support for validating notification settings. ([#4216](https://github.com/kubesphere/kubesphere/pull/4216), [@wenchajun](https://github.com/wenchajun)) +- Add support for importing Grafana dashboards by specifying a dashboard URL or by uploading a Grafana dashboard JSON file. KubeSphere automatically converts Grafana dashboards into KubeSphere cluster dashboards. ([#4194](https://github.com/kubesphere/kubesphere/pull/4194), [@zhu733756](https://github.com/zhu733756)) +- Add support for creating Grafana dashboards in **Custom Monitoring**. ([#2214](https://github.com/kubesphere/console/pull/2214), [@harrisonliu5](https://github.com/harrisonliu5)) +- Optimize the **Notification Configuration** feature. ([#2261](https://github.com/kubesphere/console/pull/2261), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add support for setting a GPU limit in the **Edit Default Container Quotas** dialog box. ([#2253](https://github.com/kubesphere/console/pull/2253), [@weili520](https://github.com/weili520)) +- Add a default GPU monitoring dashboard.([#2580](https://github.com/kubesphere/console/pull/2580), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add the **Leader** tag to the etcd leader on the etcd monitoring page. ([#2445](https://github.com/kubesphere/console/pull/2445), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) + +### Bug Fixes + +- Fix the incorrect Pod information displayed on the **Alerting Messages** page and alerting policy details page. ([#2215](https://github.com/kubesphere/console/pull/2215), [@harrisonliu5](https://github.com/harrisonliu5)) + +## Authentication & Authorization + +### New Features + +- Add a built-in OAuth 2.0 server that supports OpenID Connect. ([#3525](https://github.com/kubesphere/kubesphere/pull/3525), [@wansir](https://github.com/wansir)) +- Remove information confirmation required when an external identity provider is used. ([#4238](https://github.com/kubesphere/kubesphere/pull/4238), [@wansir](https://github.com/wansir)) + +### Bug Fixes + +- Fix incorrect source IP addresses in the login history. ([#4331](https://github.com/kubesphere/kubesphere/pull/4331), [@wansir](https://github.com/wansir)) + +## Storage + +### New Features + +- Change the parameters that determine whether volume clone, volume snapshot, and volume expansion are allowed. ([#2199](https://github.com/kubesphere/console/pull/2199), [@weili520](https://github.com/weili520)) +- Add support for setting the volume binding mode during storage class creation. ([#2220](https://github.com/kubesphere/console/pull/2220), [@weili520](https://github.com/weili520)) +- Add the volume instance management feature. ([#2226](https://github.com/kubesphere/console/pull/2226), [@weili520](https://github.com/weili520)) +- Add support for multiple snapshot classes. Users are allowed to select a snapshot type when creating a snapshot. ([#2218](https://github.com/kubesphere/console/pull/2218), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Change the volume access mode options on the **Volume Settings** tab page. ([#2348](https://github.com/kubesphere/console/pull/2348), [@live77](https://github.com/live77)) + +## Network + +### New Features + +- Add the Route sorting, routing rule editing, and annotation editing features on the Route list page. ([#2165](https://github.com/kubesphere/console/pull/2165), [@harrisonliu5](https://github.com/harrisonliu5)) +- Refactor the cluster gateway and project gateway features. ([#2262](https://github.com/kubesphere/console/pull/2262), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add the service name auto-completion feature in routing rule creation. ([#2196](https://github.com/kubesphere/console/pull/2196), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- DNS optimizations for ks-console: + - Use the name of the ks-apiserver Service directly instead of `ks-apiserver.kubesphere-system.svc` as the API URL. + - Add a DNS cache plugin (dnscache) for caching DNS results. ([#2435](https://github.com/kubesphere/console/pull/2435), [@live77](https://github.com/live77)) + +### Bug Fixes + +- Add a **Cancel** button in the **Enable Gateway** dialog box. ([#2245](https://github.com/kubesphere/console/pull/2245), [@weili520](https://github.com/weili520)) + +## Apps & App Store + +### New Features + +- Add support for setting a synchronization interval during app repository creation and editing. ([#2311](https://github.com/kubesphere/console/pull/2311), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add a disclaimer in the App Store. ([#2173](https://github.com/kubesphere/console/pull/2173), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Add support for dynamically loading community-developed Helm charts into the App Store. ([#4250](https://github.com/kubesphere/kubesphere/pull/4250), [@xyz-li](https://github.com/xyz-li)) + +### Bug Fixes + +- Fix an issue where the value of `kubesphere_app_template_count` is always `0` when `GetKubeSphereStats` is called. ([#4130](https://github.com/kubesphere/kubesphere/pull/4130), [@Hanamichi](https://github.com/ks-ci-bohttps://github.com/x893675)) + +## DevOps + +### New Features + +- Set the system to hide the **Branch** column on the **Run Records** tab page when the current pipeline is not a multi-branch pipeline. ([#2379](https://github.com/kubesphere/console/pull/2379), [@live77](https://github.com/live77)) +- Add the feature of automatically loading Jenkins configurations from ConfigMaps. ([#75](https://github.com/kubesphere/ks-devops/pull/75), [@JohnNiang](https://github.com/JohnNiang)) +- Add support for triggering pipelines by manipulating CRDs instead of calling Jenkins APIs. ([#41](https://github.com/kubesphere/ks-devops/issues/41), [@rick](https://github.com/LinuxSuRen)) +- Add support for containerd-based pipelines. ([#171](https://github.com/kubesphere/ks-devops/pull/171), [@rick](https://github.com/LinuxSuRen)) +- Add Jenkins job metadata into pipeline annotations. ([#254](https://github.com/kubesphere/ks-devops/issues/254), [@JohnNiang](https://github.com/JohnNiang)) + +### Bug Fixes + +- Fix an issue where credential creation and update fails when the value length of a parameter is too long. ([#123](https://github.com/kubesphere/ks-devops/pull/123), [@shihh](https://github.com/shihaoH)) +- Fix an issue where ks-apiserver crashes when the **Run Records** tab page of a parallel pipeline is opened. ([#93](https://github.com/kubesphere/ks-devops/pull/93), [@JohnNiang](https://github.com/JohnNiang)) + +### Dependency Upgrades + +- Upgrade the version of Configuration as Code to 1.53. ([#42](https://github.com/kubesphere/ks-jenkins/pull/42), [@rick](https://github.com/LinuxSuRen)) + +## Installation + +### New Features + +- Add support for Kubernetes v1.21.5 and v1.22.1. ([#634](https://github.com/kubesphere/kubekey/pull/634), [@pixiake](https://github.com/pixiake)) +- Add support for automatically setting the container runtime. ([#738](https://github.com/kubesphere/kubekey/pull/738), [@pixiake](https://github.com/pixiake)) +- Add support for automatically updating Kubernetes certificates. ([#705](https://github.com/kubesphere/kubekey/pull/705), [@pixiake](https://github.com/pixiake)) +- Add support for installing Docker and conatinerd using a binary file. ([#657](https://github.com/kubesphere/kubekey/pull/657), [@pixiake](https://github.com/pixiake)) +- Add support for Flannel VxLAN and direct routing. ([#606](https://github.com/kubesphere/kubekey/pull/606), [@kinglong08](https://github.com/kinglong08)) +- Add support for deploying etcd using a binary file. ([#634](https://github.com/kubesphere/kubekey/pull/634), [@pixiake](https://github.com/pixiake)) +- Add an internal load balancer for deploying a high availability system. ([#567](https://github.com/kubesphere/kubekey/pull/567), [@24sama](https://github.com/24sama)) + +### Bug Fixes +- Fix a runtime.RawExtension serialization error. ([#731](https://github.com/kubesphere/kubekey/pull/731), [@pixiake](https://github.com/pixiake)) +- Fix the nil pointer error during cluster upgrade. ([#684](https://github.com/kubesphere/kubekey/pull/684), [@24sama](https://github.com/24sama)) +- Add support for updating certificates of Kubernetes v1.20.0 and later. ([#690](https://github.com/kubesphere/kubekey/pull/690), [@24sama](https://github.com/24sama)) +- Fix a DNS address configuration error. ([#637](https://github.com/kubesphere/kubekey/pull/637), [@pixiake](https://github.com/pixiake)) +- Fix a cluster creation error that occurs when no default gateway address exists. ([#661](https://github.com/kubesphere/kubekey/pull/661), [@liulangwa](https://github.com/liulangwa)) + +## User Experience +- Fix language mistakes and optimize wording. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Fix incorrect function descriptions. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Remove hard-coded and concatenated UI strings to better support UI localization and internationalization. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Add conditional statements to display correct English singular and plural forms. ([@Patrick-LuoYu](https://github.com/Patrick-LuoYu), [@Felixnoo](https://github.com/Felixnoo), [@serenashe](https://github.com/serenashe)) +- Optimize the **Pod Scheduling Rules** area in the **Create Deployment** dialog box. ([#2170](https://github.com/kubesphere/console/pull/2170), [@qinyueshang](https://github.com/qinyueshang)) +- Fix an issue in the **Edit Project Quotas** dialog box, where the quota value changes to 0 when it is set to infinity. ([#2118](https://github.com/kubesphere/console/pull/2118), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an issue in the **Create ConfigMap** dialog box, where the position of the hammer icon is incorrect when the data entry is empty. ([#2206](https://github.com/kubesphere/console/pull/2206), [@fuchunlan](https://github.com/fuchunlan)) +- Fix the incorrect default value of the time range drop-down list on the **Overview** page of projects. ([#2340](https://github.com/kubesphere/console/pull/2340), [@fuchunlan](https://github.com/fuchunlan)) +- Fix an error that occurs during login redirection, where redirection fails if the referer URL contains an ampersand (&). ([#2194](https://github.com/kubesphere/console/pull/2194), [@harrisonliu5](https://github.com/harrisonliu5)) +- Change **1 hours** to **1 hour** on the custom monitoring dashboard creation page. ([#2276](https://github.com/kubesphere/console/pull/2276), [@live77](https://github.com/live77)) +- Fix the incorrect Service types on the Service list page. ([#2178](https://github.com/kubesphere/console/pull/2178), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- Fix the incorrect traffic data displayed in grayscale release job details. ([#2422](https://github.com/kubesphere/console/pull/2422), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue in the **Edit Project Quotas** dialog box, where values with two decimal places and values greater than 8 cannot be set. ([#2127](https://github.com/kubesphere/console/pull/2127), [@weili520](https://github.com/weili520)) +- Allow the **About** dialog box to be closed by clicking other areas of the window. ([#2114](https://github.com/kubesphere/console/pull/2114), [@fuchunlan](https://github.com/fuchunlan)) +- Optimize the project title so that the cursor is changed into a hand when hovering over the project title. ([#2128](https://github.com/kubesphere/console/pull/2128), [@fuchunlan](https://github.com/fuchunlan)) +- Add support for creating ConfigMaps and Secrets in the **Environment Variables** area of the **Create Deployment** dialog box. ([#2227](https://github.com/kubesphere/console/pull/2227), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add support for setting Pod annotations in the **Create Deployment** dialog box. ([#2129](https://github.com/kubesphere/console/pull/2129), [@harrisonliu5](https://github.com/harrisonliu5)) +- Allow domain names to start with an asterisk (*). ([#2432](https://github.com/kubesphere/console/pull/2432), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- Add support for searching for Harbor images in the **Create Deployment** dialog box. ([#2132](https://github.com/kubesphere/console/pull/2132), [@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- Add support for mounting volumes to init containers. ([#2166](https://github.com/kubesphere/console/pull/2166), [@Sigboom](https://github.com/Sigboom)) +- Remove the workload auto-restart feature in volume expansion. ([#4121](https://github.com/kubesphere/kubesphere/pull/4121), [@wenhuwang](https://github.com/wenhuwang)) + + +## APIs + +- Deprecate router API version v1alpha2. ([#4193](https://github.com/kubesphere/kubesphere/pull/4193), [@RolandMa1986](https://github.com/RolandMa1986)) +- Upgrade the pipeline API version from v2 to v3. ([#2323](https://github.com/kubesphere/console/pull/2323), [@harrisonliu5](https://github.com/harrisonliu5)) +- Change the Secret verification API. ([#2368](https://github.com/kubesphere/console/pull/2368), [@harrisonliu5](https://github.com/harrisonliu5)) +- Client credential is required for OAuth2 Token endpoint.([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) + +## Component Changes + +- kubefed: v0.7.0 -> v0.8.1 +- prometheus-operator: v0.42.1 -> v0.43.2 +- notification-manager: v1.0.0 -> v1.4.0 +- fluent-bit: v1.6.9 -> v1.8.3 +- kube-events: v0.1.0 -> v0.3.0 +- kube-auditing: v0.1.2 -> v0.2.0 +- istio: 1.6.10 -> 1.11.1 +- jaeger: 1.17 -> 1.27 +- kiali: v1.26.1 -> v1.38 +- KubeEdge: v1.6.2 -> 1.7.2 \ No newline at end of file diff --git a/content/en/docs/release/release-v321.md b/content/en/docs/release/release-v321.md new file mode 100644 index 000000000..2c5caa1d5 --- /dev/null +++ b/content/en/docs/release/release-v321.md @@ -0,0 +1,46 @@ +--- +title: "Release Notes for 3.2.1" +keywords: "Kubernetes, KubeSphere, release notes" +description: "KubeSphere Release Notes for 3.2.1" +linkTitle: "Release Notes - 3.2.1" +weight: 18099 +--- + +## Enhancements and Bug Fixes + +### Enhancements + +- Add support for filtering Pods by status. ([#4434](https://github.com/kubesphere/kubesphere/pull/4434), [@iawia002](https://github.com/iawia002), [#2620](https://github.com/kubesphere/console/pull/2620), [@weili520](https://github.com/weili520)) +- Add a tip in the image builder creation dialog box, which indicates that containerd is not supported. ([#2734](https://github.com/kubesphere/console/pull/2734), [@weili520](https://github.com/weili520)) +- Add information about available quotas in the **Edit Project Quotas** dialog box. ([#2619](https://github.com/kubesphere/console/pull/2619), [@weili520](https://github.com/weili520)) + +### Bug Fixes + +- Change the password verification rules to prevent passwords without uppercase letters. ([#4481](https://github.com/kubesphere/kubesphere/pull/4481), [@live77](https://github.com/live77)) +- Fix a login issue, where a user from an LDAP identity provider cannot log in if information about the user does not exist on KubeSphere. ([#4436](https://github.com/kubesphere/kubesphere/pull/4436), [@RolandMa1986](https://github.com/RolandMa1986)) +- Fix an issue where cluster gateway metrics cannot be obtained. ([#4457](https://github.com/kubesphere/kubesphere/pull/4457), [@RolandMa1986](https://github.com/RolandMa1986)) +- Fix incorrect access modes displayed in the volume list. ([#2686](https://github.com/kubesphere/console/pull/2686), [@weili520](https://github.com/weili520)) +- Remove the **Update** button on the **Gateway Settings** page. ([#2608](https://github.com/kubesphere/console/pull/2608), [@weili520](https://github.com/weili520)) +- Fix a display error of the time range selection drop-down list. ([#2715](https://github.com/kubesphere/console/pull/2715), [@weili520](https://github.com/weili520)) +- Fix an issue where Secret data text is not displayed correctly when the text is too long. ([#2600](https://github.com/kubesphere/console/pull/2600), [@weili520](https://github.com/weili520)) +- Fix an issue where StatefulSet creation fails when a volume template is mounted. ([#2730](https://github.com/kubesphere/console/pull/2730), [@weili520](https://github.com/weili520)) +- Fix an issue where cluster gateway information fails to be obtained when the user does not have permission to view cluster information. ([#2695](https://github.com/kubesphere/console/pull/2695), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where status and run records of pipelines are not automatically updated. ([#2594](https://github.com/kubesphere/console/pull/2594), [@harrisonliu5](https://github.com/harrisonliu5)) +- Add a tip for the kubernetesDeply pipeline step, which indicates that the step is about to be deprecated. ([#2660](https://github.com/kubesphere/console/pull/2660), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix an issue where HTTP registry addresses of image registry Secrets cannot be validated. ([#2795](https://github.com/kubesphere/console/pull/2795), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix the incorrect URL of the Harbor image. ([#2784](https://github.com/kubesphere/console/pull/2784), [@harrisonliu5](https://github.com/harrisonliu5)) +- Fix a display error of log search results. ([#2598](https://github.com/kubesphere/console/pull/2598), [@weili520](https://github.com/weili520)) +- Fix an error in the volume instance YAML configuration. ([#2629](https://github.com/kubesphere/console/pull/2629), [@weili520](https://github.com/weili520)) +- Fix incorrect available workspace quotas displayed in the **Edit Project Quotas** dialog box. ([#2613](https://github.com/kubesphere/console/pull/2613), [@weili520](https://github.com/weili520)) +- Fix an issue in the **Monitoring** dialog box, where the time range selection drop-down list does not function properly. ([#2722](https://github.com/kubesphere/console/pull/2722), [@weili520](https://github.com/weili520)) +- Fix incorrect available quotas displayed in the Deployment creation page. ([#2668](https://github.com/kubesphere/console/pull/2668), [@weili520](https://github.com/weili520)) +- Change the documentation address to [kubesphere.io](http://kubesphere.io/) and [kubesphere.com.cn](http://kubesphere.com.cn/). ([#2628](https://github.com/kubesphere/console/pull/2628), [@weili520](https://github.com/weili520)) +- Fix an issue where Deployment volume settings cannot be modified. ([#2656](https://github.com/kubesphere/console/pull/2656), [@weili520](https://github.com/weili520)) +- Fix an issue where the container terminal cannot be accessed when the browser language is not English, Simplified Chinese, or Traditional Chinese. ([#2702](https://github.com/kubesphere/console/pull/2702), [@weili520](https://github.com/weili520)) +- Fix incorrect volume status displayed in the Deployment editing dialog box. ([#2622](https://github.com/kubesphere/console/pull/2622), [@weili520](https://github.com/weili520)) +- Remove labels displayed on the credential details page. ([#2621](https://github.com/kubesphere/console/pull/2621), [@123liubao](https://github.com/123liubao)) +- Fix the problem caused by non-ASCII branch names. ([#399](https://github.com/kubesphere/ks-devops/pull/399)) +- Fix the wrong handling of the choice parameter in Pipeline. ([#378](https://github.com/kubesphere/ks-devops/pull/378)) +- Fix the problem that could not proceed or break the pipeline created by others. [#408](https://github.com/kubesphere/ks-devops/pull/408) +- Fix messy sequence of pipeline run records. [#394](https://github.com/kubesphere/ks-devops/pull/394) +- Fix pipeline triggered by non-admin user but still display "Started by user admin". [#384](https://github.com/kubesphere/ks-devops/pull/384) diff --git a/content/en/docs/toolbox/_index.md b/content/en/docs/toolbox/_index.md index 83e6b4668..79b2aff82 100644 --- a/content/en/docs/toolbox/_index.md +++ b/content/en/docs/toolbox/_index.md @@ -11,6 +11,3 @@ icon: "/images/docs/docs.svg" --- KubeSphere provides several important functionalities from the toolbox. This chapter demonstrates how to use the toolbox of KubeSphere to query events, logs, and auditing logs, view resource consumption information, and run commands with web kubectl. - -![toolbox](/images/docs/toolbox/index/toolbox.png) - diff --git a/content/en/docs/toolbox/auditing/auditing-query.md b/content/en/docs/toolbox/auditing/auditing-query.md index 43a3e1290..da235f967 100644 --- a/content/en/docs/toolbox/auditing/auditing-query.md +++ b/content/en/docs/toolbox/auditing/auditing-query.md @@ -14,23 +14,21 @@ You need to enable [KubeSphere Auditing Logs](../../../pluggable-components/audi ## Enter the Query Interface -1. The query function is available for all users. Log in to the console with any account, hover over the in the lower-right corner and select **Auditing Operating**. +1. The query function is available for all users. Log in to the console with any user, hover over the in the lower-right corner and select **Audit Log Search**. {{< notice note >}} -Any account has the authorization to query auditing logs, while the logs each account is able to see are different. +Any user has the permission to query auditing logs, while the logs that each user is able to see are different. -- If an account has the authorization of viewing resources in a project, it can see the auditing log that happens in this project, such as workload creation in the project. -- If an account has the authorization of listing projects in a workspace, it can see the auditing log that happens in this workspace but not in projects, such as project creation in the workspace. -- If an account has the authorization of listing projects in a cluster, it can see the auditing log that happens in this cluster but not in workspaces and projects, such as workspace creation in the cluster. +- If a user has the permission to view resources in a project, it can see the auditing log that happens in this project, such as workload creation in the project. +- If a user has the permission to list projects in a workspace, it can see the auditing log that happens in this workspace but not in projects, such as project creation in the workspace. +- If a user has the permission to list projects in a cluster, it can see the auditing log that happens in this cluster but not in workspaces and projects, such as workspace creation in the cluster. {{}} 2. In the pop-up window, you can view log trends in the last 12 hours. - ![auditing-logs](/images/docs/toolbox/auditing-query/auditing-logs.png) - -3. The **Auditing Operating** console supports the following query parameters: +3. The **Audit Log Search** console supports the following query parameters:
@@ -84,10 +82,6 @@ Any account has the authorization to query auditing logs, while the logs each ac ## Enter Query Parameters -1. Select a filter and enter the keyword you want to search. For example, query auditing logs containing the information of `services` created as shown in the following screenshot: +1. Select a filter and enter the keyword you want to search. For example, query auditing logs containing the information of `services` created. - ![services-created](/images/docs/toolbox/auditing-query/services-created.png) - -2. You can click the results to see the auditing log details. - - ![auditing-log-details](/images/docs/toolbox/auditing-query/auditing-log-details.png) \ No newline at end of file +2. You can click the results to see the auditing log details. \ No newline at end of file diff --git a/content/en/docs/toolbox/auditing/auditing-receive-customize.md b/content/en/docs/toolbox/auditing/auditing-receive-customize.md index 55ff5b018..546960459 100644 --- a/content/en/docs/toolbox/auditing/auditing-receive-customize.md +++ b/content/en/docs/toolbox/auditing/auditing-receive-customize.md @@ -126,7 +126,7 @@ spec: ``` {{< notice tip >}} -You can also use an account of `platform-admin` role to log in to the console, search `Webhook` in **CRDs** on the **Cluster Management** page, and edit `kube-auditing-webhook` directly. +You can also use a user of `platform-admin` role to log in to the console, search `Webhook` in **CRDs** on the **Cluster Management** page, and edit `kube-auditing-webhook` directly. {{}} diff --git a/content/en/docs/toolbox/auditing/auditing-rule.md b/content/en/docs/toolbox/auditing/auditing-rule.md index d548d0214..17ba026e9 100644 --- a/content/en/docs/toolbox/auditing/auditing-rule.md +++ b/content/en/docs/toolbox/auditing/auditing-rule.md @@ -8,11 +8,7 @@ weight: 15320 An auditing rule defines the policy for processing auditing logs. KubeSphere Auditing Logs provide users with two CRD rules (`archiving-rule` and `alerting-rule`) for customization. -After you enable [KubeSphere Auditing Logs](../../../pluggable-components/auditing-logs/), log in to the console with an account of `platform-admin` role. In **CRDs** on the **Cluster Management** page, enter `rules.auditing.kubesphere.io` in the search bar. Click the result **Rule** as below and you can see the two CRD rules. - -![auditing-crd](/images/docs/toolbox/auditing-crd.jpg) - -![alerting-archiving-rule](/images/docs/toolbox/alerting-archiving-rule.jpg) +After you enable [KubeSphere Auditing Logs](../../../pluggable-components/auditing-logs/), log in to the console with a user of `platform-admin` role. In **CRDs** on the **Cluster Management** page, enter `rules.auditing.kubesphere.io` in the search bar. Click the result **Rule** and you can see the two CRD rules. Below are examples of part of the rules. diff --git a/content/en/docs/toolbox/events-query.md b/content/en/docs/toolbox/events-query.md index 3506adf63..8e7cc9091 100644 --- a/content/en/docs/toolbox/events-query.md +++ b/content/en/docs/toolbox/events-query.md @@ -16,29 +16,23 @@ This guide demonstrates how you can do multi-level, fine-grained event queries t ## Query Events -1. The event query function is available for all users. Log in to the console with any account, hover over in the lower-right corner and select **Event Search**. +1. The event query function is available for all users. Log in to the console with any account, hover over in the lower-right corner and select **Resource Event Search**. -2. In the displayed dialog box, you can view the number of events that the account has permission to view. - - ![event-search](/images/docs/toolbox/event-query/event-search.png) +2. In the displayed dialog box, you can view the number of events that the user has permission to view. {{< notice note >}} - KubeSphere supports event queries on each cluster separately if you have enabled the [multi-cluster feature](../../multicluster-management/). You can click on the left of the search box and select a target cluster. -- KubeSphere stores events for last seven days by default. +- KubeSphere stores events for the last seven days by default. {{}} 3. You can click the search box and enter a condition to search for events by message, workspace, project, resource type, resource name, reason, category, or time range (for example, use `Time Range:Last 10 minutes` to search for events within the last 10 minutes). - ![event-search-list](/images/docs/toolbox/event-query/event-search-list.png) +4. Click any one of the results from the list, and you can see raw information of it. It is convenient for developers in terms of debugging and analysis. -4. Click any one of the results from the list, and you can see raw information of it. It is convenient for developers in terms of debugging and analyzing. - - ![event-details](/images/docs/toolbox/event-query/event-details.png) - - {{< notice note >}} +{{< notice note >}} The event query interface supports dynamic refreshing every 5s, 10s or 15s. diff --git a/content/en/docs/toolbox/log-query.md b/content/en/docs/toolbox/log-query.md index 6ff4228b7..48ee7489d 100644 --- a/content/en/docs/toolbox/log-query.md +++ b/content/en/docs/toolbox/log-query.md @@ -18,9 +18,7 @@ You need to enable the [KubeSphere Logging System](../../pluggable-components/lo 1. The log query function is available for all users. Log in to the console with any account, hover over in the lower-right corner and select **Log Search**. -2. In the displayed dialog box, you can see a time histogram of log numbers, a cluster selection drop-down list and a log search box. - - ![log-search](/images/docs/toolbox/log-query/log-search.png) +2. In the pop-up window, you can see a time histogram of log numbers, a cluster selection drop-down list, and a log search bar. {{< notice note >}} @@ -30,11 +28,9 @@ You need to enable the [KubeSphere Logging System](../../pluggable-components/lo {{}} -3. You can click the search box and enter a condition to search for logs by keyword, project, workload, Pod, container, or time range (for example, use `Time Range:Last 10 minutes` to search for logs within the last 10 minutes). Alternatively, click on the bars in the time histogram, and KubeSphere will use the time range of that bar for log queries. +3. You can customize the query time range by selecting **Time Range** in the log search bar. Alternatively, click on the bars in the time histogram, and KubeSphere will use the time range of that bar for log queries. - ![log-search-list](/images/docs/toolbox/log-query/log-search-list.png) - - {{< notice note >}} +{{< notice note >}} - The keyword field supports the query of keyword combinations. For example, you can use `Error`, `Fail`, `Fatal`, `Exception`, and `Warning` together to query all the exception logs. - The keyword field supports exact query and fuzzy query. The fuzzy query provides case-insensitive fuzzy matching and retrieval of full terms by the first half of a word or phrase based on the ElasticSearch segmentation rules. For example, you can retrieve the logs containing `node_cpu_total` by searching the keyword `node_cpu` instead of the keyword `cpu`. @@ -44,32 +40,20 @@ You need to enable the [KubeSphere Logging System](../../pluggable-components/lo ## Use Search Parameters -1. You can enter multiple conditions to narrow down your search results. +1. You can provide as many fields as possible to narrow down your search results. - ![log-search-conditions](/images/docs/toolbox/log-query/log-search-conditions.png) - -3. Click any one of the results from the list. Drill into its details page and inspect the log from this Pod, including the complete context on the right. It is convenient for developers in terms of debugging and analyzing. - - ![log-search-details-page](/images/docs/toolbox/log-query/log-search-details-page.png) +2. Click any one of the results from the list. Drill into its detail page and inspect the log from this Pod, including the complete context on the right. It is convenient for developers in terms of debugging and analyzing. {{< notice note >}} -- The log query interface supports dynamic refreshing with 5s, 10s, or 15s. -- You can click in the upper-right corner to export logs to a local file for further analysis. +The log query interface supports dynamic refreshing with 5s, 10s or 15s, and allows users to export logs to a local file for further analysis (in the upper-right corner). -{{}} - -4. In the left panel, you can click to switch between Pods and inspect its containers within the same project. In this case, you can detect if any abnormal Pods affect other Pods. + {{}} +4. In the left panel, you can click to view the Pod details page or container details page. ## Drill into the Details Page -In the left panel, you can click to view the Pod details page or container details page. +1. If the log looks abnormal, you can drill into the Pod detail page or container detail page to further inspect container logs, resource monitoring graphs, and events. -The following figure shows the Pod details page: - -![pod-details-page](/images/docs/toolbox/log-query/pod-details-page.png) - -The following figure shows the container details page. You can click **Terminal** in the upper-left corner to open the terminal and debug the container. - -![container-detail-page](/images/docs/toolbox/log-query/container-detail-page.png) \ No newline at end of file +2. Inspect the container detail page. At the same time, it allows you to open the terminal to debug the container directly. diff --git a/content/en/docs/toolbox/metering-and-billing/enable-billing.md b/content/en/docs/toolbox/metering-and-billing/enable-billing.md index 169d7cf51..468987a00 100644 --- a/content/en/docs/toolbox/metering-and-billing/enable-billing.md +++ b/content/en/docs/toolbox/metering-and-billing/enable-billing.md @@ -81,6 +81,4 @@ Perform the following steps to enable KubeSphere Billing. kubectl rollout restart deploy ks-apiserver -n kubesphere-system ``` -4. On the **Metering and Billing** page, you can see the cost information of resources. - - ![metering-and-billing](/images/docs/toolbox/metering-and-billing/enable-billing/metering-and-billing.png) \ No newline at end of file +4. On the **Metering and Billing** page, you can see the cost information of resources. \ No newline at end of file diff --git a/content/en/docs/toolbox/metering-and-billing/view-resource-consumption.md b/content/en/docs/toolbox/metering-and-billing/view-resource-consumption.md index ddd14d593..de01a3e85 100644 --- a/content/en/docs/toolbox/metering-and-billing/view-resource-consumption.md +++ b/content/en/docs/toolbox/metering-and-billing/view-resource-consumption.md @@ -21,9 +21,7 @@ KubeSphere metering helps you track resource consumption within a given cluster 2. Click **View Consumption** in the **Cluster Resource Consumption** section. -3. On the left side of the dashboard, you can see a cluster list containing your Host Cluster and all Member Clusters if you have enabled [multi-cluster management](../../../multicluster-management/). There is only one cluster called `default` in the list if it is not enabled. - - ![cluster-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png) +3. On the left side of the dashboard, you can see a cluster list containing your host cluster and all member clusters if you have enabled [multi-cluster management](../../../multicluster-management/). There is only one cluster called `default` in the list if it is not enabled. On the right side, there are three parts showing resource consumption in different ways. @@ -47,10 +45,6 @@ KubeSphere metering helps you track resource consumption within a given cluster
4. You can click a cluster on the left and dive deeper into a node or Pod to see detailed consumption information. - - ![node-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/node-page.png) - - ![pod-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/pod-page.png) {{< notice note >}} @@ -68,16 +62,10 @@ KubeSphere metering helps you track resource consumption within a given cluster 3. On the left side of the dashboard, you can see a list containing all the workspaces in the current cluster. The right part displays detailed consumption information in the selected workspace, the layout of which is basically the same as that of a cluster. - ![workspace-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png) - {{< notice note >}} In a multi-cluster architecture, you cannot see the metering and billing information of a workspace if it does not have any available cluster assigned to it. For more information, see [Cluster Visibility and Authorization](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/). {{}} -4. Click a workspace on the left and dive deeper into a project or workload (for example, Deployment and StatefulSet) to see detailed consumption information. - - ![project-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/project-page.png) - - ![workload-page](/images/docs/toolbox/metering-and-billing/view-resource-consumption/workload-page.png) \ No newline at end of file +4. Click a workspace on the left and dive deeper into a project or workload (for example, Deployment and StatefulSet) to see detailed consumption information. \ No newline at end of file diff --git a/content/en/docs/toolbox/web-kubectl.md b/content/en/docs/toolbox/web-kubectl.md index 349790651..cc5be8cf0 100644 --- a/content/en/docs/toolbox/web-kubectl.md +++ b/content/en/docs/toolbox/web-kubectl.md @@ -14,13 +14,9 @@ This tutorial demonstrates how to use web kubectl to operate on and manage clust ## Use Web Kubectl -1. Log in to KubeSphere with an account granted the `platform-admin` role, hover over the **Toolbox** in the lower-right corner and select **Kubectl**. +1. Log in to KubeSphere with a user granted the `platform-admin` role, hover over the **Toolbox** in the lower-right corner and select **Kubectl**. - ![web-kubectl-enter](/images/docs/web-kubectl/web-kubectl-enter.png) - -2. You can see the kubectl interface as shown in the pop-up window. If you have enabled the multi-cluster feature, you need to select the target cluster first from the drop-down list in the upper-right corner. This drop-down list is not visible if the multi-cluster feature is not enabled. - - ![web-kubectl-cluster-select](/images/docs/web-kubectl/web-kubectl-cluster-select.png) +2. You can see the kubectl interface in the pop-up window. If you have enabled the multi-cluster feature, you need to select the target cluster first from the drop-down list in the upper-right corner. This drop-down list is not visible if the multi-cluster feature is not enabled. 3. Enter kubectl commands in the command-line tool to query and manage Kubernetes cluster resources. For example, execute the following command to query the status of all PVCs in the cluster. diff --git a/content/en/docs/upgrade/_index.md b/content/en/docs/upgrade/_index.md index 3008c7916..80a63e812 100644 --- a/content/en/docs/upgrade/_index.md +++ b/content/en/docs/upgrade/_index.md @@ -11,4 +11,4 @@ icon: "/images/docs/docs.svg" --- -This chapter demonstrates how cluster operators can upgrade KubeSphere to v3.1.1. \ No newline at end of file +This chapter demonstrates how cluster operators can upgrade KubeSphere to 3.2.1. \ No newline at end of file diff --git a/content/en/docs/upgrade/air-gapped-upgrade-with-ks-installer.md b/content/en/docs/upgrade/air-gapped-upgrade-with-ks-installer.md index 33b110585..a4ad40b5f 100644 --- a/content/en/docs/upgrade/air-gapped-upgrade-with-ks-installer.md +++ b/content/en/docs/upgrade/air-gapped-upgrade-with-ks-installer.md @@ -1,6 +1,6 @@ --- title: "Air-Gapped Upgrade with ks-installer" -keywords: "Air-Gapped, upgrade, kubesphere, v3.1.1" +keywords: "Air-Gapped, upgrade, kubesphere, 3.2.1" description: "Use ks-installer and offline package to upgrade KubeSphere." linkTitle: "Air-Gapped Upgrade with ks-installer" weight: 7500 @@ -11,11 +11,11 @@ ks-installer is recommended for users whose Kubernetes clusters were not set up ## Prerequisites -- You need to have a KubeSphere cluster running v3.0.0. If your KubeSphere version is v2.1.1 or earlier, upgrade to v3.0.0 first. -- Read [Release Notes for 3.1.1](../../release/release-v311/) carefully. +- You need to have a KubeSphere cluster running v3.1.x. If your KubeSphere version is v3.0.0 or earlier, upgrade to v3.1.x first. +- Read [Release Notes for 3.2.1](../../release/release-v321/) carefully. - Back up any important component beforehand. - A Docker registry. You need to have a Harbor or other Docker registries. For more information, see [Prepare a Private Image Registry](../../installing-on-linux/introduction/air-gapped-installation/#step-2-prepare-a-private-image-registry). -- Supported Kubernetes versions of KubeSphere v3.1.1: v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- Supported Kubernetes versions of KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x, and v1.22.x (experimental). ## Step 1: Prepare Installation Images @@ -24,7 +24,7 @@ As you install KubeSphere in an air-gapped environment, you need to prepare an i 1. Download the image list file `images-list.txt` from a machine that has access to Internet through the following command: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} @@ -36,7 +36,7 @@ As you install KubeSphere in an air-gapped environment, you need to prepare an i 2. Download `offline-installation-tool.sh`. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. Make the `.sh` file executable. @@ -96,10 +96,10 @@ Similar to installing KubeSphere on an existing Kubernetes cluster in an online 1. Execute the following command to download ks-installer and transfer it to your machine that serves as the taskbox for installation. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` -2. Verify that you have specified your private image registry in `spec.local_registry` in `cluster-configuration.yaml`. Note that if your existing cluster was installed in an air-gapped environment, you may already have this field specified. Otherwise, run the following command to edit `cluster-configuration.yaml` of your existing KubeSphere v3.0.0 cluster and add the private image registry: +2. Verify that you have specified your private image registry in `spec.local_registry` in `cluster-configuration.yaml`. Note that if your existing cluster was installed in an air-gapped environment, you may already have this field specified. Otherwise, run the following command to edit `cluster-configuration.yaml` of your existing KubeSphere v3.1.x cluster and add the private image registry: ``` kubectl edit cc -n kubesphere-system @@ -169,5 +169,3 @@ Now, you will be able to access the web console of KubeSphere through `http://{I To access the console, make sure port 30880 is opened in your security group. {{}} - -![kubesphere-login](https://ap3.qingstor.com/kubesphere-website/docs/login.png) \ No newline at end of file diff --git a/content/en/docs/upgrade/air-gapped-upgrade-with-kubekey.md b/content/en/docs/upgrade/air-gapped-upgrade-with-kubekey.md index de215a9d9..f07645da5 100644 --- a/content/en/docs/upgrade/air-gapped-upgrade-with-kubekey.md +++ b/content/en/docs/upgrade/air-gapped-upgrade-with-kubekey.md @@ -1,6 +1,6 @@ --- title: "Air-Gapped Upgrade with KubeKey" -keywords: "Air-Gapped, kubernetes, upgrade, kubesphere, v3.1.1" +keywords: "Air-Gapped, kubernetes, upgrade, kubesphere, 3.2.1" description: "Use the offline package to upgrade Kubernetes and KubeSphere." linkTitle: "Air-Gapped Upgrade with KubeKey" weight: 7400 @@ -9,8 +9,9 @@ Air-gapped upgrade with KubeKey is recommended for users whose KubeSphere and Ku ## Prerequisites -- You need to have a KubeSphere cluster running v3.0.0. If your KubeSphere version is v2.1.1 or earlier, upgrade to v3.0.0 first. -- Read [Release Notes for 3.1.1](../../release/release-v311/) carefully. +- You need to have a KubeSphere cluster running v3.1.x. If your KubeSphere version is v3.0.0 or earlier, upgrade to v3.1.x first. +- Your Kubernetes version must be v1.19.x or later. +- Read [Release Notes for 3.2.1](../../release/release-v321/) carefully. - Back up any important component beforehand. - A Docker registry. You need to have a Harbor or other Docker registries. For more information, see [Prepare a Private Image Registry](../../installing-on-linux/introduction/air-gapped-installation/#step-2-prepare-a-private-image-registry). - Make sure every node can push and pull images from the Docker Registry. @@ -46,7 +47,7 @@ KubeKey upgrades Kubernetes from one MINOR version to the next MINOR version unt ### Step 1: Download KubeKey -Similar to installing KubeSphere on Linux in an online environment, you need to [download KubeKey v1.1.0](https://github.com/kubesphere/kubekey/releases) first. Download the `tar.gz` file, and transfer it to your local machine which serves as the taskbox for installation. After you uncompress the file, execute the following command to make `kk` executable: +Similar to installing KubeSphere on Linux in an online environment, you need to [download KubeKey v1.2.1](https://github.com/kubesphere/kubekey/releases) first. Download the `tar.gz` file, and transfer it to your local machine which serves as the taskbox for installation. After you uncompress the file, execute the following command to make `kk` executable: ```bash chmod +x kk @@ -59,19 +60,19 @@ As you install KubeSphere and Kubernetes on Linux, you need to prepare an image 1. Download the image list file `images-list.txt` from a machine that has access to Internet through the following command: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} - This file lists images under `##+modulename` based on different modules. You can add your own images to this file following the same rule. To view the complete file, see [Appendix](../../installing-on-linux/introduction/air-gapped-installation/#image-list-of-kubesphere-v310). + This file lists images under `##+modulename` based on different modules. You can add your own images to this file following the same rule. To view the complete file, see [Appendix](../../installing-on-linux/introduction/air-gapped-installation/#image-list-of-kubesphere-v321). {{}} 2. Download `offline-installation-tool.sh`. ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. Make the `.sh` file executable. @@ -101,18 +102,18 @@ As you install KubeSphere and Kubernetes on Linux, you need to prepare an image 5. Download the Kubernetes binary file. ```bash - ./offline-installation-tool.sh -b -v v1.17.9 + ./offline-installation-tool.sh -b -v v1.21.5 ``` If you cannot access the object storage service of Google, run the following command instead to add the environment variable to change the source. ```bash - export KKZONE=cn;./offline-installation-tool.sh -b -v v1.17.9 + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.21.5 ``` {{< notice note >}} - - You can change the Kubernetes version downloaded based on your needs. Recommended Kubernetes versions for KubeSphere v3.1.1: v1.17.9, v1.18.8, v1.19.8, v1.20.4 and v1.20.6. If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.19.8 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). + - You can change the Kubernetes version downloaded based on your needs. Recommended Kubernetes versions for KubeSphere 3.2.1 are v1.19.x, v1.20.x, v1.21.x, and v1.22.x (experimental). If you do not specify a Kubernetes version, KubeKey will install Kubernetes v1.21.5 by default. For more information about supported Kubernetes versions, see [Support Matrix](../../installing-on-linux/introduction/kubekey/#support-matrix). - You can upgrade Kubernetes from v1.16.13 to v1.17.9 by downloading the v1.17.9 Kubernetes binary file, but for cross-version upgrades, all intermediate versions need to be downloaded in advance. For example, if you want to upgrade Kubernetes from v1.15.12 to v1.18.6, you need to download Kubernetes v1.16.13 and v1.17.9, and the v1.18.6 binary file. @@ -158,8 +159,8 @@ Transfer your packaged image file to your local machine and execute the followin | | Kubernetes | KubeSphere | | ------ | ---------- | ---------- | -| Before | v1.16.13 | v3.0.0 | -| After | v1.17.9 | v3.1.1 | +| Before | v1.18.6 | v3.1.x | +| After | v1.21.5 | 3.2.1 | #### Upgrade a cluster @@ -176,7 +177,7 @@ Execute the following command to generate an example configuration file for inst For example: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -187,7 +188,7 @@ Make sure the Kubernetes version is the one you downloaded. #### Edit the configuration file -Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). {{< notice warning >}} @@ -217,7 +218,7 @@ Set `privateRegistry` of your `config-sample.yaml` file: privateRegistry: dockerhub.kubekey.local ``` -#### Upgrade your single-node cluster to KubeSphere v3.1.1 and Kubernetes v1.17.9 +#### Upgrade your single-node cluster to KubeSphere 3.2.1 and Kubernetes v1.21.5 ```bash ./kk upgrade -f config-sample.yaml @@ -225,10 +226,10 @@ Set `privateRegistry` of your `config-sample.yaml` file: To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v.21.x +- v1.22.x (experimental) ### Air-gapped upgrade for multi-node clusters @@ -246,8 +247,8 @@ To upgrade Kubernetes to a specific version, explicitly provide the version afte | | Kubernetes | KubeSphere | | ------ | ---------- | ---------- | -| Before | v1.16.13 | v3.0.0 | -| After | v1.17.9 | v3.1.1 | +| Before | v1.18.6 | v3.1.x | +| After | v1.21.5 | 3.2.1 | #### Upgrade a cluster @@ -264,7 +265,7 @@ In this example, KubeSphere is installed on multiple nodes, so you need to speci For example: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -275,7 +276,7 @@ Make sure the Kubernetes version is the one you downloaded. #### Edit the configuration file -Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md). +Edit the configuration file `config-sample.yaml`. Here is [an example for your reference](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md). {{< notice warning >}} @@ -307,7 +308,7 @@ Set `privateRegistry` of your `config-sample.yaml` file: privateRegistry: dockerhub.kubekey.local ``` -#### Upgrade your multi-node cluster to KubeSphere v3.1.1 and Kubernetes v1.17.9 +#### Upgrade your multi-node cluster to KubeSphere 3.2.1 and Kubernetes v1.21.5 ```bash ./kk upgrade -f config-sample.yaml @@ -315,8 +316,7 @@ Set `privateRegistry` of your `config-sample.yaml` file: To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 - +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x (experimental) diff --git a/content/en/docs/upgrade/overview.md b/content/en/docs/upgrade/overview.md index 5184cfaa7..5af314877 100644 --- a/content/en/docs/upgrade/overview.md +++ b/content/en/docs/upgrade/overview.md @@ -1,18 +1,17 @@ --- title: "Upgrade — Overview" -keywords: "Kubernetes, upgrade, KubeSphere, v3.1.1, upgrade" -description: "Understand what you need to pay attention to before the upgrade, such as versions and upgrade tools." +keywords: "Kubernetes, upgrade, KubeSphere, 3.2.1, upgrade" +description: "Understand what you need to pay attention to before the upgrade, such as versions, and upgrade tools." linkTitle: "Overview" weight: 7100 --- ## Make Your Upgrade Plan -KubeSphere v3.1.1 is compatible with Kubernetes 1.17.x, 1.18.x, 1.19.x and 1.20.x: +KubeSphere 3.2.1 is compatible with Kubernetes 1.19.x, 1.20.x, 1.21.x, and 1.22.x (experimental): -- Before you upgrade your cluster to KubeSphere v3.1.1, you need to have a KubeSphere cluster running v3.0.0. -- If your existing KubeSphere v3.0.0 cluster is installed on Kubernetes 1.17.x+, you can choose to only upgrade KubeSphere to v3.1.1 or upgrade Kubernetes (to a higher version) and KubeSphere (to v3.1.1) at the same time. -- If your existing KubeSphere v3.0.0 cluster is installed on Kubernetes 1.16.x or earlier, you have to upgrade Kubernetes (to 1.17.x+) and KubeSphere (to v3.1.1) at the same time. +- Before you upgrade your cluster to KubeSphere 3.2.1, you need to have a KubeSphere cluster running v3.1.x. +- If your existing KubeSphere v3.1.x cluster is installed on Kubernetes 1.19.x+, you can choose to only upgrade KubeSphere to 3.2.1 or upgrade Kubernetes (to a higher version) and KubeSphere (to 3.2.1) at the same time. ## Before the Upgrade diff --git a/content/en/docs/upgrade/upgrade-with-ks-installer.md b/content/en/docs/upgrade/upgrade-with-ks-installer.md index 00829cfb9..e02e25135 100644 --- a/content/en/docs/upgrade/upgrade-with-ks-installer.md +++ b/content/en/docs/upgrade/upgrade-with-ks-installer.md @@ -1,6 +1,6 @@ --- title: "Upgrade with ks-installer" -keywords: "Kubernetes, upgrade, KubeSphere, v3.1.1" +keywords: "Kubernetes, upgrade, KubeSphere, v3.2.0" description: "Use ks-installer to upgrade KubeSphere." linkTitle: "Upgrade with ks-installer" weight: 7300 @@ -10,20 +10,20 @@ ks-installer is recommended for users whose Kubernetes clusters were not set up ## Prerequisites -- You need to have a KubeSphere cluster running v3.0.0. If your KubeSphere version is v2.1.1 or earlier, upgrade to v3.0.0 first. -- Read [Release Notes for 3.1.1](../../release/release-v311/) carefully. +- You need to have a KubeSphere cluster running v3.1.x. If your KubeSphere version is v3.0.0 or earlier, upgrade to v3.1.x first. +- Read [Release Notes for 3.2.1](../../release/release-v321/) carefully. - Back up any important component beforehand. -- Supported Kubernetes versions of KubeSphere v3.1.1: v1.17.x, v1.18.x, v1.19.x or v1.20.x. +- Supported Kubernetes versions of KubeSphere 3.2.1: v1.19.x, v1.20.x, v1.21.x, and v1.22.x (experimental). ## Apply ks-installer Run the following command to upgrade your cluster. ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` ## Enable Pluggable Components -You can [enable new pluggable components](../../pluggable-components/overview/) of KubeSphere v3.1.1 after the upgrade to explore more features of the container platform. +You can [enable new pluggable components](../../pluggable-components/overview/) of KubeSphere 3.2.1 after the upgrade to explore more features of the container platform. diff --git a/content/en/docs/upgrade/upgrade-with-kubekey.md b/content/en/docs/upgrade/upgrade-with-kubekey.md index a0ec7a63d..23a371ba6 100644 --- a/content/en/docs/upgrade/upgrade-with-kubekey.md +++ b/content/en/docs/upgrade/upgrade-with-kubekey.md @@ -1,6 +1,6 @@ --- title: "Upgrade with KubeKey" -keywords: "Kubernetes, upgrade, KubeSphere, v3.1.1, KubeKey" +keywords: "Kubernetes, upgrade, KubeSphere, 3.2.1, KubeKey" description: "Use KubeKey to upgrade Kubernetes and KubeSphere." linkTitle: "Upgrade with KubeKey" weight: 7200 @@ -11,8 +11,8 @@ This tutorial demonstrates how to upgrade your cluster using KubeKey. ## Prerequisites -- You need to have a KubeSphere cluster running v3.0.0. If your KubeSphere version is v2.1.1 or earlier, upgrade to v3.0.0 first. -- Read [Release Notes for 3.1.1](../../release/release-v311/) carefully. +- You need to have a KubeSphere cluster running v3.1.x. If your KubeSphere version is v3.0.0 or earlier, upgrade to v3.1.x first. +- Read [Release Notes for 3.2.1](../../release/release-v321/) carefully. - Back up any important component beforehand. - Make your upgrade plan. Two scenarios are provided in this document for [all-in-one clusters](#all-in-one-cluster) and [multi-node clusters](#multi-node-cluster) respectively. @@ -27,7 +27,7 @@ Follow the steps below to download KubeKey before you upgrade your cluster. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly. ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -43,7 +43,7 @@ export KKZONE=cn Run the following command to download KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -58,7 +58,7 @@ After you download KubeKey, if you transfer it to a new machine also with poor n {{< notice note >}} -The commands above download the latest release (v1.1.1) of KubeKey. You can change the version number in the command to download a specific version. +The commands above download the latest release (v1.2.1) of KubeKey. You can change the version number in the command to download a specific version. {{}} @@ -80,18 +80,18 @@ When upgrading Kubernetes, KubeKey will upgrade from one MINOR version to the ne ### All-in-one cluster -Run the following command to use KubeKey to upgrade your single-node cluster to KubeSphere v3.1.1 and Kubernetes v1.20.4: +Run the following command to use KubeKey to upgrade your single-node cluster to KubeSphere 3.2.1 and Kubernetes v1.21.5: ```bash -./kk upgrade --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk upgrade --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x (experimental) ### Multi-node cluster @@ -120,26 +120,26 @@ Edit `sample.yaml` based on your cluster configuration. Make sure you replace th {{< notice note >}} -For more information, see [Edit the configuration file](../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file) or refer to the `Cluster` section of [the complete configuration file](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) for more information. +For more information, see [Edit the configuration file](../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file) or refer to the `Cluster` section of [the complete configuration file](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) for more information. {{}} #### Step 3: Upgrade your cluster -The following command upgrades your cluster to KubeSphere v3.1.1 and Kubernetes v1.20.4: +The following command upgrades your cluster to KubeSphere 3.2.1 and Kubernetes v1.21.5: ```bash -./kk upgrade --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 -f sample.yaml +./kk upgrade --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f sample.yaml ``` To upgrade Kubernetes to a specific version, explicitly provide the version after the flag `--with-kubernetes`. Available versions are: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x (experimental) {{< notice note >}} -To use new features of KubeSphere v3.1.1, you may need to enable some pluggable components after the upgrade. +To use new features of KubeSphere 3.2.1, you may need to enable some pluggable components after the upgrade. {{}} \ No newline at end of file diff --git a/content/en/docs/upgrade/what-changed.md b/content/en/docs/upgrade/what-changed.md index 64183fb66..7f052c003 100644 --- a/content/en/docs/upgrade/what-changed.md +++ b/content/en/docs/upgrade/what-changed.md @@ -1,13 +1,13 @@ --- title: "Changes after Upgrade" -keywords: "Kubernetes, upgrade, KubeSphere, v3.1.1" +keywords: "Kubernetes, upgrade, KubeSphere, 3.2.1" description: "Understand what will be changed after the upgrade." linkTitle: "Changes after Upgrade" weight: 7600 --- -This section covers the changes after upgrade for existing settings in previous versions. If you want to know all the new features and enhancements in KubeSphere 3.1.1, see [Release Notes for 3.1.1](../../release/release-v311/). +This section covers the changes after upgrade for existing settings in previous versions. If you want to know all the new features and enhancements in KubeSphere 3.2.1, see [Release Notes for 3.2.1](../../release/release-v321/). ## Access Control diff --git a/content/en/docs/workspace-administration/app-repository/import-helm-repository.md b/content/en/docs/workspace-administration/app-repository/import-helm-repository.md index 47cda6647..4e9a017f1 100644 --- a/content/en/docs/workspace-administration/app-repository/import-helm-repository.md +++ b/content/en/docs/workspace-administration/app-repository/import-helm-repository.md @@ -16,18 +16,14 @@ This tutorial demonstrates how to add an app repository to KubeSphere. - You need to enable the [KubeSphere App Store (OpenPitrix)](../../../pluggable-components/app-store/). - You need to have an app repository. Refer to [the official documentation of Helm](https://v2.helm.sh/docs/developing_charts/#the-chart-repository-guide) to create repositories or [upload your own apps to the public repository of KubeSphere](../upload-app-to-public-repository/). Alternatively, use the example repository in the steps below, which is only for demonstration purposes. -- You need to create a workspace and a user account (`ws-admin`). The account must be granted the role of `workspace-admin` in the workspace. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/). +- You need to create a workspace and a user (`ws-admin`). The user must be granted the role of `workspace-admin` in the workspace. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). ## Add an App Repository 1. Log in to the web console of KubeSphere as `ws-admin`. In your workspace, go to **App Repositories** under **App Management**, and then click **Add**. - ![app-repo](/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo.png) - 2. In the dialog that appears, specify an app repository name and add your repository URL. For example, enter `https://charts.kubesphere.io/main`. - ![app-info-dialogue](/images/docs/workspace-administration/app-repository/import-helm-repository/app-info-dialogue.png) - - **Name**: Set a simple and clear name for the repository, which is easy for users to identify. - **URL**: Follow the RFC 3986 specification with the following three protocols supported: - S3: The URL is S3-styled, such as `s3..amazonaws.com` for the access to Amazon S3 services using the S3 interface. If you select this type, you need to provide the access key and secret. @@ -39,12 +35,12 @@ If you want to use basic access authentication in HTTP/HTTPS, you can use a URL {{}} + - **Synchronization Interval**: Interval of synchronizing the remote app repository. + - **Description**: Give a brief introduction of main features of the app repository. 3. After you specify required fields, click **Validate** to verify the URL. You will see a green check mark next to the URL if it is available and click **OK** to finish. - ![validate-link](/images/docs/workspace-administration/app-repository/import-helm-repository/validate-link.png) - {{< notice note >}} - In an on-premises private cloud environment, you can build your own repository based on [ChartMuseum](https://chartmuseum.com/). Then, you develop and upload applications to the repository and deploy them on KubeSphere for your own needs. @@ -53,6 +49,4 @@ If you want to use basic access authentication in HTTP/HTTPS, you can use a URL {{}} -4. The repository appears in the repository list below after imported and KubeSphere automatically adds all apps in the repository as app templates. When users choose to deploy apps using app templates, they can see apps in this repository. For more information, see [Deploy Apps from App Templates](../../../project-user-guide/application/deploy-app-from-template/). - - ![app-repo-list](/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo-list.png) +4. The repository appears in the repository list after imported and KubeSphere automatically adds all apps in the repository as app templates. When users choose to deploy apps using app templates, they can see apps in this repository. For more information, see [Deploy Apps from App Templates](../../../project-user-guide/application/deploy-app-from-template/). diff --git a/content/en/docs/workspace-administration/department-management.md b/content/en/docs/workspace-administration/department-management.md index 5981fc796..99b266609 100644 --- a/content/en/docs/workspace-administration/department-management.md +++ b/content/en/docs/workspace-administration/department-management.md @@ -12,21 +12,20 @@ A department in a workspace is a logical unit used for permission control. You c ## Prerequisites -- You need to [create a workspace and an account](../../quick-start/create-workspace-and-project/) assigned the `workspace-admin` role in the workspace. This document uses the `demo-ws` workspace and the `ws-admin` account as an example. +- You need to [create a workspace and a user](../../quick-start/create-workspace-and-project/) assigned the `workspace-admin` role in the workspace. This document uses the `demo-ws` workspace and the `ws-admin` account as an example. - To set project roles or DevOps project roles in a department, you need to [create at least one project or DevOps project](../../quick-start/create-workspace-and-project/) in the workspace. ## Create a Department 1. Log in to the KubeSphere web console as `ws-admin` and go to the `demo-ws` workspace. -2. On the left navigation bar, choose **Department Management** under **Workspace Settings**, and click **Set Department** on the right. +2. On the left navigation bar, choose **Department Management** under **Workspace Settings**, and click **Set Departments** on the right. -3. In the **Set Department** dialog box, set the following fields and click **OK** to create a department. +3. In the **Set Departments** dialog box, set the following parameters and click **OK** to create a department. {{< notice note >}} * If a department has already been created in the workspace, you can click **Create Department** to add more departments to the workspace. - * You can create multiple departments and multiple sub-departments in each department. To create a subdepartment, select a department on the left department tree and click **Create Department** on the right. {{}} @@ -37,13 +36,13 @@ A department in a workspace is a logical unit used for permission control. You c * **Project Role**: Role of all department members in a project. You can click **Add Project** to specify multiple project roles. Only one role can be specified for each project. * **DevOps Project Role**: Role of all department members in a DevOps project. You can click **Add DevOps Project** to specify multiple DevOps project roles. Only one role can be specified for each DevOps project. -4. Click **Close** after the department is created. On the **Department Management** page, the created department is displayed in a department tree on the left. +4. Click **OK** after the department is created, and then click **Close**. On the **Department Management** page, the created department is displayed in a department tree on the left. ## Assign a User to a Department -1. On the **Department Management** page, select a department in the department tree on the left and click **Unassigned** on the right. +1. On the **Department Management** page, select a department in the department tree on the left and click **Not Assigned** on the right. -2. In the unassigned user list, click on the right of a user, and click **OK** for the displayed message to assign the user to the department. +2. In the user list, click on the right of a user, and click **OK** for the displayed message to assign the user to the department. {{< notice note >}} @@ -59,9 +58,9 @@ A department in a workspace is a logical unit used for permission control. You c ## Delete and Edit a Department -1. On the **Department Management** page, click **Set Department**. +1. On the **Department Management** page, click **Set Departments**. -2. In the **Set Department** dialog box, on the left, click the upper level of the department to be edited or deleted. +2. In the **Set Departments** dialog box, on the left, click the upper level of the department to be edited or deleted. 3. Click on the right of the department to edit it. diff --git a/content/en/docs/workspace-administration/project-quotas.md b/content/en/docs/workspace-administration/project-quotas.md index 9c4503af5..ad59de15f 100644 --- a/content/en/docs/workspace-administration/project-quotas.md +++ b/content/en/docs/workspace-administration/project-quotas.md @@ -6,52 +6,46 @@ linkTitle: "Project Quotas" weight: 9600 --- -KubeSphere uses [Kubernetes requests and limits](https://kubesphere.io/blogs/understand-requests-and-limits-in-kubernetes/) to control resource (for example, CPU and memory) usage in a project, also known as [ResourceQuotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/) in Kubernetes. Requests make sure a project can get the resources it needs as they are specifically guaranteed and reserved. On the contrary, limits ensure that a project can never use resources above a certain value. +KubeSphere uses [Kubernetes requests and limits](https://kubesphere.io/blogs/understand-requests-and-limits-in-kubernetes/) to control resource (for example, CPU and memory) usage in a project, also known as [resource quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/) in Kubernetes. Requests make sure a project can get the resources it needs as they are specifically guaranteed and reserved. On the contrary, limits ensure that a project can never use resources above a certain value. -Besides CPU and memory, you can also set resource quotas for other objects separately such as Pods, [Deployments](../../project-user-guide/application-workloads/deployments/), [Jobs](../../project-user-guide/application-workloads/jobs/), [Services](../../project-user-guide/application-workloads/services/) and [ConfigMaps](../../project-user-guide/configuration/configmaps/) in a project. +Besides CPU and memory, you can also set resource quotas for other objects separately such as Pods, [Deployments](../../project-user-guide/application-workloads/deployments/), [Jobs](../../project-user-guide/application-workloads/jobs/), [Services](../../project-user-guide/application-workloads/services/), and [ConfigMaps](../../project-user-guide/configuration/configmaps/) in a project. This tutorial demonstrates how to configure quotas for a project. ## Prerequisites -You have an available workspace, a project and an account (`ws-admin`). The account must have the `admin` role at the workspace level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +You have an available workspace, a project and a user (`ws-admin`). The user must have the `admin` role at the workspace level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). {{< notice note >}} -If you use the account `project-admin` (an account of the `admin` role at the project level), you can set project quotas as well for a new project (i.e. its quotas remain unset). However, `project-admin` cannot change project quotas once they are set. Generally, it is the responsibility of `ws-admin` to set limits and requests for a project. `project-admin` is responsible for [setting limit ranges](../../project-administration/container-limit-ranges/) for containers in a project. +If you use the user `project-admin` (a user of the `admin` role at the project level), you can set project quotas as well for a new project (i.e. its quotas remain unset). However, `project-admin` cannot change project quotas once they are set. Generally, it is the responsibility of `ws-admin` to set limits and requests for a project. `project-admin` is responsible for [setting limit ranges](../../project-administration/container-limit-ranges/) for containers in a project. {{}} ## Set Project Quotas -1. Log in to the console as `ws-admin` and go to a project. On the **Overview** page, you can see project quotas remain unset if the project is newly created. Click **Set** to configure quotas. +1. Log in to the console as `ws-admin` and go to a project. On the **Overview** page, you can see project quotas remain unset if the project is newly created. Click **Edit Quotas** to configure quotas. - ![project-quotas](/images/docs/workspace-administration/project-quotas/project-quotas.png) - -2. In the dialog that appears, you can see that KubeSphere does not set any requests or limits for a project by default. To set +2. In the displayed dialog box, you can see that KubeSphere does not set any requests or limits for a project by default. To set limits to control CPU and memory resources, use the slider to move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. - ![set-project-quotas](/images/docs/workspace-administration/project-quotas/set-project-quotas.png) - {{< notice note >}} The limit can never be lower than the request. {{}} -3. To set quotas for other resources, click **Add Quota Item** and select an object from the list. - - ![set-other-resouce-quotas](/images/docs/workspace-administration/project-quotas/set-other-resouce-quotas.png) +3. To set quotas for other resources, click **Add** under **Project Resource Quotas**, and then select a resource or enter a recource name and set a quota. 4. Click **OK** to finish setting quotas. 5. Go to **Basic Information** in **Project Settings**, and you can see all resource quotas for the project. -6. To change project quotas, click **Manage Project** on the **Basic Information** page and select **Edit Quota**. +6. To change project quotas, click **Edit Project** on the **Basic Information** page and select **Edit Project Quotas**. {{< notice note >}} - For [a multi-cluster project](../../project-administration/project-and-multicluster-project/#multi-cluster-projects), the option **Edit Quota** does not display in the **Manage Project** drop-down menu. To set quotas for a multi-cluster project, go to **Quota Management** under **Project Settings** and click **Edit Quota**. Note that as a multi-cluster project runs across clusters, you can set resource quotas on different clusters separately. + For [a multi-cluster project](../../project-administration/project-and-multicluster-project/#multi-cluster-projects), the option **Edit Project Quotas** does not display in the **Manage Project** drop-down menu. To set quotas for a multi-cluster project, go to **Projects Quotas** under **Project Settings** and click **Edit Quotas**. Note that as a multi-cluster project runs across clusters, you can set resource quotas on different clusters separately. {{}} diff --git a/content/en/docs/workspace-administration/role-and-member-management.md b/content/en/docs/workspace-administration/role-and-member-management.md index 084de5a32..b6c2bab04 100644 --- a/content/en/docs/workspace-administration/role-and-member-management.md +++ b/content/en/docs/workspace-administration/role-and-member-management.md @@ -6,17 +6,11 @@ linkTitle: "Workspace Role and Member Management" weight: 9400 --- -This tutorial demonstrates how to manage roles and members in a workspace. At the workspace level, you can grant permissions in the following modules to a role: - -- **Project Management** -- **DevOps Project Management** -- **App Management** -- **Access Control** -- **Workspace Settings** +This tutorial demonstrates how to manage roles and members in a workspace. ## Prerequisites -At least one workspace has been created, such as `demo-workspace`. Besides, you need an account of the `workspace-admin` role (for example, `ws-admin`) at the workspace level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +At least one workspace has been created, such as `demo-workspace`. Besides, you need a user of the `workspace-admin` role (for example, `ws-admin`) at the workspace level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). {{< notice note >}} @@ -26,20 +20,18 @@ The actual role name follows a naming convention: `workspace name-role name`. Fo ## Built-in Roles -In **Workspace Roles**, there are four available built-in roles as shown below. Built-in roles are created automatically by KubeSphere when a workspace is created and they cannot be edited or deleted. You can only view permissions included in a built-in role or assign it to a user. +In **Workspace Roles**, there are four available built-in roles. Built-in roles are created automatically by KubeSphere when a workspace is created and they cannot be edited or deleted. You can only view permissions included in a built-in role or assign it to a user. | Built-in Roles | Description | | ------------------ | ------------------------------------------------------------ | -| `workspace-viewer` | The viewer in the workspace who can view all resources in the workspace. | -| `workspace-self-provisioner` | The regular user in the workspace who can create projects and DevOps projects. | -| `workspace-regular` | The regular user in the workspace who cannot create projects or DevOps projects. | -| `workspace-admin` | The administrator in the workspace who can perform any action on any resource. It gives full control over all resources in the workspace. | +| `workspace-viewer` | Workspace viewer who can view all resources in the workspace. | +| `workspace-self-provisioner` | Workspace regular member who can view workspace settings, manage app templates, and create projects and DevOps projects. | +| `workspace-regular` | Workspace regular member who can view workspace settings. | +| `workspace-admin` | Workspace administrator who has full control over all resources in the workspace. | To view the permissions that a role contains: -1. Log in to the console as `ws-admin`. In **Workspace Roles**, click a role (for example, `workspace-admin`) and you can see role details as shown below. - - ![role-permissions](/images/docs/workspace-administration/role-and-member-management/role-permissions.png) +1. Log in to the console as `ws-admin`. In **Workspace Roles**, click a role (for example, `workspace-admin`) and you can see role details. 2. Click the **Authorized Users** tab to see all the users that are granted the role. @@ -57,20 +49,13 @@ To view the permissions that a role contains: {{}} -4. Newly-created roles will be listed in **Workspace Roles**. To edit an existing role, click on the right. - - ![role-list](/images/docs/workspace-administration/role-and-member-management/role-list.png) +4. Newly-created roles will be listed in **Workspace Roles**. To edit the information or permissions, or delete an existing role, click on the right. ## Invite a New Member -1. Navigate to **Workspace Members** under **Workspace Settings**, and click **Invite Member**. +1. Navigate to **Workspace Members** under **Workspace Settings**, and click **Invite**. 2. Invite a user to the workspace by clicking on the right of it and assign a role to it. - - 3. After you add the user to the workspace, click **OK**. In **Workspace Members**, you can see the user in the list. -4. To edit the role of an existing user or remove the user from the workspace, click on the right and select the corresponding operation. - - ![edit-existing-user](/images/docs/workspace-administration/role-and-member-management/edit-existing-user.png) - +4. To edit the role of an existing user or remove the user from the workspace, click on the right and select the corresponding operation. \ No newline at end of file diff --git a/content/en/docs/workspace-administration/upload-helm-based-application.md b/content/en/docs/workspace-administration/upload-helm-based-application.md index 685daf91a..1a2236f91 100644 --- a/content/en/docs/workspace-administration/upload-helm-based-application.md +++ b/content/en/docs/workspace-administration/upload-helm-based-application.md @@ -13,25 +13,17 @@ This tutorial demonstrates how to develop an app template by uploading a package ## Prerequisites - You need to enable the [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/). -- You need to create a workspace and a user account (`project-admin`). The account must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +- You need to create a workspace and a user (`project-admin`). The user must be invited to the workspace with the role of `workspace-self-provisioner`. For more information, refer to [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Hands-on Lab -1. Log in to KubeSphere as `project-admin`. In your workspace, go to **App Templates** under **App Management**, and click **Upload Template**. +1. Log in to KubeSphere as `project-admin`. In your workspace, go to **App Templates** under **App Management**, and click **Create**. - ![upload-app-template](/images/docs/workspace-administration/upload-helm-based-application/upload-app-template.png) - -2. In the dialog that appears, click **Upload Helm Chart Package**. You can upload your own Helm chart or download the [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) and use it as an example for the following steps. - - ![upload-helm](/images/docs/workspace-administration/upload-helm-based-application/upload-helm.png) +2. In the dialog that appears, click **Upload**. You can upload your own Helm chart or download the [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) and use it as an example for the following steps. 3. After the package is uploaded, click **OK** to continue. - ![confirm-upload](/images/docs/workspace-administration/upload-helm-based-application/confirm-upload.png) - -4. You can view the basic information of the app under **App Information**. To upload an icon for the app, click **Upload icon**. You can also skip it and click **OK** directly. - - ![upload-icon](/images/docs/workspace-administration/upload-helm-based-application/upload-icon.png) +4. You can view the basic information of the app under **App Information**. To upload an icon for the app, click **Upload Icon**. You can also skip it and click **OK** directly. {{< notice note >}} @@ -39,12 +31,8 @@ Maximum accepted resolutions of the app icon: 96 x 96 pixels. {{}} -5. The app appears in the template list with the status **Draft** after successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. +5. The app appears in the template list with the status **Developing** after successfully uploaded, which means this app is under development. The uploaded app is visible to all members in the same workspace. - ![draft-app](/images/docs/workspace-administration/upload-helm-based-application/draft-app.png) - -6. Click the app and the page opens with the **Versions** tab selected. Click the draft version to expand the menu, where you can see options including **Delete Version**, **Test Deployment**, and **Submit for Review**. - - ![version-page](/images/docs/workspace-administration/upload-helm-based-application/version-page.png) +6. Click the app and the page opens with the **Versions** tab selected. Click the draft version to expand the menu, where you can see options including **Delete**, **Install**, and **Submit for Release**. 7. For more information about how to release your app to the App Store, refer to [Application Lifecycle Management](../../application-store/app-lifecycle-management/#step-2-upload-and-submit-application). diff --git a/content/en/docs/workspace-administration/what-is-workspace.md b/content/en/docs/workspace-administration/what-is-workspace.md index 4dd4770d8..d110c0079 100644 --- a/content/en/docs/workspace-administration/what-is-workspace.md +++ b/content/en/docs/workspace-administration/what-is-workspace.md @@ -15,13 +15,11 @@ This tutorial demonstrates how to create and delete a workspace. ## Prerequisites -You have an account granted the role of `workspaces-manager`, such as `ws-manager` in [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +You have a user granted the role of `workspaces-manager`, such as `ws-manager` in [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Create a Workspace -1. Log in to the web console of KubeSphere as `ws-manager`. On the **Workspaces** page, you can see all workspaces on the platform. Click **Create**. - - ![workspaces-list](/images/docs/workspace-administration/workspace-overview/workspaces-list.png) +1. Log in to the web console of KubeSphere as `ws-manager`. Click **Platform** on the upper-left corner, and then select **Access Control**. On the **Workspaces** page, click **Create**. {{< notice note >}} @@ -29,22 +27,18 @@ You have an account granted the role of `workspaces-manager`, such as `ws-manage {{}} -2. On the **Basic Information** page, specify a name for the workspace and select an administrator from the drop-down list. Click **Create** to continue. - - ![set-workspace-info](/images/docs/workspace-administration/workspace-overview/set-workspace-info.png) +2. For single-node cluster, on the **Basic Information** page, specify a name for the workspace and select an administrator from the drop-down list. Click **Create**. - **Name**: Set a name for the workspace which serves as a unique identifier. - **Alias**: An alias name for the workspace. - - **Administrator**: Account that administers the workspace. + - **Administrator**: User that administers the workspace. - **Description**: A brief introduction of the workspace. -3. The workspace created appears in the list as shown below. + For multi-node cluster, after the basic information about the workspace is set, click **Next** to continue. On the **Cluster Settings** page, select clusters to be used in the workspace, and then click **Create**. - ![workspace-created](/images/docs/workspace-administration/workspace-overview/workspace-created.png) +3. The workspace is displayed in the workspace list after it is created. -4. Click the workspace and you can see resource status in the workspace on the **Overview** page. - - ![workspace-overview](/images/docs/workspace-administration/workspace-overview/workspace-overview.png) +4. Click the workspace and you can see resource status of the workspace on the **Overview** page. ## Delete a Workspace @@ -78,15 +72,13 @@ Be extremely cautious about deleting a workspace if you use kubectl to delete wo 1. In your workspace, go to **Basic Information** under **Workspace Settings**. On the **Basic Information** page, you can see the general information of the workspace, such as the number of projects and members. - ![workspace-basic-information](/images/docs/workspace-administration/workspace-overview/workspace-basic-information.png) - {{< notice note >}} On this page, you can click **Edit Information** to change the basic information of the workspace (excluding the workspace name) and turn on/off [Network Isolation](../../workspace-administration/workspace-network-isolation/). {{}} -2. To delete the workspace, check **Delete Workspace** and click **Delete**. +2. To delete the workspace, click **Delete** under **Delete Workspace**. In the displayed dialog box, enter the name of the workspace, and then click **OK**. {{< notice warning >}} diff --git a/content/en/docs/workspace-administration/workspace-network-isolation.md b/content/en/docs/workspace-administration/workspace-network-isolation.md index 13245136d..8bc7582da 100644 --- a/content/en/docs/workspace-administration/workspace-network-isolation.md +++ b/content/en/docs/workspace-administration/workspace-network-isolation.md @@ -10,7 +10,7 @@ weight: 9500 - You have already enabled [Network Policies](../../pluggable-components/network-policy/). -- Use an account of the `workspace-admin` role. For example, use the account `ws-admin` created in [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +- Use a user of the `workspace-admin` role. For example, use the `ws-admin` user created in [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). {{< notice note >}} @@ -22,8 +22,6 @@ weight: 9500 Workspace network isolation is disabled by default. You can turn on network isolation in **Basic Information** under **Workspace Settings**. -![workspace-isolation](/images/docs/workspace-administration/workspace-network-isolation/workspace-isolation.png) - {{< notice note >}} When network isolation is turned on, egress traffic will be allowed by default, while ingress traffic will be denied for different workspaces. If you need to customize your network policy, you need to turn on [Project Network Isolation](../../project-administration/project-network-isolation/) and add a network policy in **Project Settings**. diff --git a/content/en/docs/workspace-administration/workspace-quotas.md b/content/en/docs/workspace-administration/workspace-quotas.md index 24725c3e9..3a55c4bd2 100644 --- a/content/en/docs/workspace-administration/workspace-quotas.md +++ b/content/en/docs/workspace-administration/workspace-quotas.md @@ -14,19 +14,17 @@ This tutorial demonstrates how to manage resource quotas for a workspace. ## Prerequisites -You have an available workspace and an account (`ws-manager`). The account must have the `workspaces-manager` role at the platform level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/). +You have an available workspace and a user (`ws-manager`). The user must have the `workspaces-manager` role at the platform level. For more information, see [Create Workspaces, Projects, Users and Roles](../../quick-start/create-workspace-and-project/). ## Set Workspace Quotas 1. Log in to the KubeSphere web console as `ws-manager` and go to a workspace. -2. Navigate to **Quota Management** under **Workspace Settings**. +2. Navigate to **Workspace Quotas** under **Workspace Settings**. -3. The **Quota Management** page lists all the available clusters assigned to the workspace and their respective requests and limits of CPU and memory. Click **Edit Quota** on the right of a cluster. +3. The **Workspace Quotas** page lists all the available clusters assigned to the workspace and their respective requests and limits of CPU and memory. Click **Edit Quotas** on the right of a cluster. -4. In the dialog that appears, you can see that KubeSphere does not set any requests or limits for the workspace by default. To set requests and limits to control CPU and memory resources, use the slider to move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. - - ![edit-workspace-quotas](/images/docs/workspace-administration/workspace-quotas/edit-workspace-quotas.png) +4. In the displayed dialog box, you can see that KubeSphere does not set any requests or limits for the workspace by default. To set requests and limits to control CPU and memory resources, move to a desired value or enter numbers directly. Leaving a field blank means you do not set any requests or limits. {{< notice note >}} diff --git a/content/en/news/_index.md b/content/en/news/_index.md index 7e3f8ba08..e27d76f4f 100644 --- a/content/en/news/_index.md +++ b/content/en/news/_index.md @@ -10,6 +10,14 @@ section1: section2: news: + - title: 'Kubesphere Team will join the KubeCon China and bring 5 sessions' + description: KubeSphere Team brings 5 session to KubeCon China 2021 + image: /images/news/kubecon-china-2021/banner.png + link: 'kubecon-china-2021/' + - title: 'Kubernetes Commnuity Days China 2021 Recap' + description: The first Kubernetes Community Days China was successfully held in Beijing + image: /images/news/kcd-china/kcd-china-event.png + link: 'kubernetes-community-days-china/' - title: 'Announcing KubeSphere 3.1.0 on AWS Quick Start!' description: KubeSphere Quick Start uses AWS CloudFormation templates to help users automatically provision an Amazon EKS cluster on the AWS Cloud. End users can manage Amazon EKS clusters through the KubeSphere console. image: /images/news/aws-quick-start/quick-start-cover.png diff --git a/content/en/news/kubecon-china-2021.md b/content/en/news/kubecon-china-2021.md new file mode 100644 index 000000000..8b01623b2 --- /dev/null +++ b/content/en/news/kubecon-china-2021.md @@ -0,0 +1,80 @@ +--- +title: 'KubeSphere Team will join the KueCon China and bring 5 sessions' +keywords: KubeCon, Meetup, Kubernetes +description: KubeSphere brings 4 sessions and participte 1 office hour in KubeCon China2021 +createTime: '2021-12-09' +author: 'Feynman, Lindsay' +--- + +Every year, the Cloud-Native Computing Foundation organizes its flagship conference KubeCon and CloudNativeCon, which gathers DevOps, SRE, developers, and technologists to meet leading open source and Cloud-Native communities. This year, KubeCon and CloudNativeCon China are coming! KubeSphere Team will bring 4 sessions and participate 1 office hours in this conference, you can join virtually from 9-10 December 2021. + +## Session 1: Kubernetes Multi-cluster and Multi-tenancy Management with RBAC and KubeFed + +## Abstract + +Soft multi-tenancy is a form of multi-tenancy that does not have strict isolation of the different users, workloads, or applications. When it comes to Kubernetes, soft multi-tenancy is usually isolated by RBAC and namespaces. There are many challenges when cluster administrators implement multi-tenancy across multiple Kubernetes clusters, such as authentication and authorization, resource quota, network policy, security policy, etc. + +In this talk, KubeSphere maintainers will share their experience and best practice in designing the multi-tenancy architecture: how to manage users and authentication across multiple clusters, how to manage resource quotas for tenants in different clusters, the resource isolation mechanism, and how to authorize resources across multiple clusters. + +## Speaker +![wan-hongming](/images/news/kubecon-china-2021/wan-hongming.png) +Hongming Wan - Senior Software Engineer, QingCloud Technologies. + +Hongming is the core contributor of KubeSphere, and leads the KubeSphere Multi-tenancy and Security team. He focuses on open source and cloud-native security areas. + +## Session 2: Ship Apps to Multi-cluster Environments from an App-centric Abstraction + +## Abstract + +Many application definitions and frameworks are emerging from the CNCF landscape. Helm Chart and Operator are the most popular ways to package and manage applications in the Kubernetes ecosystem. From the CNCF Survey 2020, the enterprise architecture represented by multi-cluster and multi-cloud has been a new trend in modern infrastructure. How can we leverage the app-centric concepts to provide self-service to deliver/deploy applications across multiple Kubernetes clusters and clouds? KubeSphere Team is building a unified control plane to enable users to deliver applications and cloud functions with a consistent workflow. In this talk, KubeSphere maintainers will talk about: + +- Uncomplicating the Helm Chart and Operator deployment using CRD +- How to propagate a cloud native application across multiple clouds +- How to manage Operator and its CRD across multiple clouds +- How to extend your operator in an elegant interface + +## Speaker +![lai-zhengyi](/images/news/kubecon-china-2021/lai-zhengyi.png) +Zhengyi Lai - KubeSphere Dev Lead, QingCloud Technologies + +Zhengyi Lai is the maintainer of the KubeSphere. He has contributed to helm, virtual-kubelet, grpc-gateway, etc. Zhengyi is also maintaining the application store, network, and pluggable architecture in KubeSphere. His main work focuses on networking, multi-clustering, application delivery and cloud-native technologies such as Artifact Hub. + +## Session 3: Build a modern FaaS platform with Cloud Native Serverless technologies + +## Abstract + +As the core of Serverless, FaaS (Function-as-a-Service) has gained more and more attention. The emerging cloud native serverless technologies make it possible to build a robust modern FaaS platform by replacing the key components of a FaaS platform with more powerful cloud native alternatives. In this talk, OpenFunction maintainers will talk about: + +- The key components that make a FaaS platform, including function framework, function build, function serving, and function event management. +- The advantage of the emerging cloud native serverless technologies in each of the key areas of FaaS including Knative Serving, Cloud Native Buildpacks, Shipwright, Tekton, KEDA, and Dapr. +- How to build a powerful modern FaaS platform with these cloud native technologies by the example of OpenFunction +- Why does event management matter for FaaS? +- Why OpenFunction create its own event management system "OpenFunction Events" when there're already Knative eventing and Argo Events? + +## Speaker +![huo-binjie](/images/news/kubecon-china-2021/huo-binjie.png) +Benjamin Huo - Founder of OpenFunction + +Benjamin Huo led the KubeSphere Observability and Serverless team. He is the creator of FluentBit Operator and the founder of the FaaS project [OpenFunction]((https://github.com/OpenFunction/OpenFunction)), also the author and architect of several observability open source projects such as Kube-Events, Notification Manager, etc. He loves cloud-native and open source technologies and is the contributor of Prometheus. + +![lei-wanjun](/images/news/kubecon-china-2021/lei-wanjun.png) +Wanjun Lei - KubeSphere Dev Lead, QingCloud Technologies. + +Wanjun Lei is the maintainer of OpenFunction and is responsible for developing OpenFunction. He is also the maintainer of FluentBit Operator, and a member of the KubeSphere Observability team, where he is responsible for the development of Notification Manager. He loves cloud native and open source technologies, and is a contributor to fluent bit and nats. + +## Session 4: KubeSphere use case sharing in the OpenEBS Office Hours + +![openebs-office-hours](/images/news/kubecon-china-2021/openebs-office-hours.png) +In cloud-based Kubernetes clusters, persistent storage services are usually provided by cloud providers. When enterprises build an on-premise Kubernetes platform or adopt a Kuberntes distribution in production, persistent storage is the biggest challenge. OpenEBS automates the management of storage attached to the Kubernetes worker nodes and allow the storage to be used for dynamically provisioning OpenEBS PVs or Local PVs. KubeSphere is an open source container platform built on Kubernetes, it integrates OpenEBS as the default persistent storage to provide out-of-the-box persistent storage services for users. + +In this talk, Feynman Zhou and Stone Shi from KubeSphere Team will introduce how they leverage OpenEBS and Kubernetes to build a container platform and run stateful workloads on it. + + +## Session 5: AWS invited KubeSphere Product Manager to speak at the interview + +![aws-interview](/images/news/kubecon-china-2021/aws-interview.png) + + +## RSVP + +You can register the KubeCon and CloudNativeCon 2021 via this [link](https://www.lfasiallc.com/kubecon-cloudnativecon-open-source-summit-china/register/), join us virtually from 9-10 December 2021. \ No newline at end of file diff --git a/content/en/news/kubernetes-community-days-china.md b/content/en/news/kubernetes-community-days-china.md new file mode 100644 index 000000000..5dd11074b --- /dev/null +++ b/content/en/news/kubernetes-community-days-china.md @@ -0,0 +1,43 @@ +--- +title: "Kubernetes Community Days China Recap" +keywords: " Community, Kubernetes" +description: "The first Kubernetes Community Days China was successfully held in Beijing" +createTime: "2021-11-02" +author: "Lindsay" +--- + +## Kubernetes Community Days China + +![kcd-china-event](/images/news/kcd-china/kcd-china-event.png) + + [Kubernetes Community Days (KCD)](https://community.cncf.io/kubernetes-community-days/about-kcd/) was initiated by the Cloud Native Computing Foundation (CNCF) and is jointly organized by local CNCF ambassadors, CNCF employees, and CNCF members across the world. KCD is currently being actively organized worldwide, gathering end users, contributors, and technical experts from the open-source community in the cloud-native field. This series of localized activities will help grow the Kubernetes Community and spread cloud-native technology more widely among end-users in different industries. + +Cloud-native technologies and Kubernetes have become more and more popular in China. According to Jim Zemlin, executive director of the Linux Foundation, China's contribution to the Kubernetes community ranks second in the world, next only to the United States. In the "[CNCF China Cloud Native Survey 2020](https://www.cncf.io/blog/2021/04/28/cncf-cloud-native-survey-china-2020/)", it is also mentioned that the proportion of Chinese companies using Kubernetes in the production environment has reached as high as 82%, and Chinese teams have contributed 13 open-source projects. + +## Event Organizers + +Collaborating with CNCF ambassadors from PingCAP, DaoCloud, Huawei Cloud, KubeSphere, and Cloud Native Community, CNCF hosted the first Kubernetes Community Days (KCD) in Beijing, focusing on open-source projects and technical practices in the cloud-native ecosystem. + +![kcd-beijing-organizers](/images/news/kcd-china/kcd-beijing-organizers.png) + +## Event at a Glance + +KCD Beijing set up 4 keynote speeches, 4 lightning talks and 1 panel session, in which CTOs and architects from many well-known companies such as Microsoft Azure, NGINX Community, Tencent, DaoCloud, VMware, Xinglan Technology, VIPKID, and maintainers of the open-source project brings a lot of inspiring ideas. + +![guest-speakers](/images/news/kcd-china/guest-speakers.png) + +On the day of the event, nearly 300 people signed up, and more than 150 people participated at the Microsoft Beijing office. Six online platforms provided real-time live broadcasts, attracting more than 5,000 people to watch. + + + +![kcd-event](/images/news/kcd-china/kcd-event.png) + +![kcd-event-2](/images/news/kcd-china/kcd-event-2.png) + +![kcd-event-3](/images/news/kcd-china/kcd-event-3.png) + + + +All the decks shared by speakers have been uploaded to CNCF/Presentations. Feel free to download or comment! + +Download: https://github.com/cncf/presentations/tree/master/chinese/kcd-china \ No newline at end of file diff --git a/content/en/privacy/_index.md b/content/en/privacy/_index.md index 35aefb475..642765013 100644 --- a/content/en/privacy/_index.md +++ b/content/en/privacy/_index.md @@ -61,7 +61,7 @@ css: "scss/private.scss" For the purpose of the GDPR, Service Providers are considered Data Processors.

  • -

    Third-party Social Media Service refers to any website or any social network website through which a User can log in or create an account to use the Service.

    +

    Third-party Social Media Service refers to any website or any social network website through which a User can log in or create a user to use the Service.

  • Usage Data refers to data collected automatically, either generated by the use of the Service or from the Service infrastructure itself (for example, the duration of a page visit).

    diff --git a/content/tr/conferences/admin-quick-start.md b/content/tr/conferences/admin-quick-start.md index 09b4dd913..3bb7286c1 100644 --- a/content/tr/conferences/admin-quick-start.md +++ b/content/tr/conferences/admin-quick-start.md @@ -31,7 +31,7 @@ The role of cluster-admin is able to create accounts for other users and assign #### Step 1: Create roles and accounts -First, we will create a new role (user-manager), grants account management and role management authority to this role, then we will create an account and grant the user-manager role to this account. +First, we will create a new role (user-manager), grants account management and role management authority to this role, then we will create a user and grant the user-manager role to this account. | Account Name | Cluster Role | Responsibility | | ------------ | ------------ | --------------------------------- | @@ -47,7 +47,7 @@ First, we will create a new role (user-manager), grants account management and r ![](https://pek3b.qingstor.com/kubesphere-docs/png/20190716112826.png) -1.4. Click **Platform**, then navigate to **Accounts** page and click **Create** to create an account. +1.4. Click **Platform**, then navigate to **Accounts** page and click **Create** to create a user. ![](https://pek3b.qingstor.com/kubesphere-docs/png/20190716112945.png) diff --git a/content/zh/_index.md b/content/zh/_index.md index 3bae4d514..e6f882cde 100644 --- a/content/zh/_index.md +++ b/content/zh/_index.md @@ -90,7 +90,7 @@ section4: - name: 支持多种存储与网络方案 icon: /images/home/multi-tenant-management.svg - content: 支持 GlusterFS、Ceph、NFS、LocalPV,提供多个 CSI 插件对接公有云与企业级存储;提供面向物理机 Kubernetes 环境的负载均衡器 Porter,支持网络策略可视化,支持 Calico、Flannel、Cilium、Kube-OVN 等网络插件 + content: 支持 GlusterFS、Ceph、NFS、LocalPV,提供多个 CSI 插件对接公有云与企业级存储;提供面向物理机 Kubernetes 环境的负载均衡器 OpenELB,支持网络策略可视化,支持 Calico、Flannel、Cilium、Kube-OVN 等网络插件 features: - name: Kubernetes DevOps 系统 diff --git a/content/zh/blogs/DevOps-pipeline-remove-Docker-dependencies.md b/content/zh/blogs/DevOps-pipeline-remove-Docker-dependencies.md index e2a63893f..fd3694f1f 100644 --- a/content/zh/blogs/DevOps-pipeline-remove-Docker-dependencies.md +++ b/content/zh/blogs/DevOps-pipeline-remove-Docker-dependencies.md @@ -61,7 +61,7 @@ containerd github.com/containerd/containerd v1.4.3 269548fa27e0089a8b8278fc4 这里主要用于测试,因此没有将 Podman 安装到基础镜像中,而是在流水线中实时安装。生产环境,应该提前安装,以加快执行速度。 -以 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 为例,流水线中主要需要增加如下部分: +以 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 为例,流水线中主要需要增加如下部分: ```groovy stage ('install podman') { @@ -78,11 +78,11 @@ containerd github.com/containerd/containerd v1.4.3 269548fa27e0089a8b8278fc4 } ``` -相关脚本,已经更新到 [Podman](https://github.com/kubesphere/devops-java-sample/tree/podman) 分支中。 +相关脚本,已经更新到 [Podman](https://github.com/kubesphere/devops-maven-sample/tree/podman) 分支中。 -## 测试 devops-java-sample 项目 +## 测试 devops-maven-sample 项目 -使用 devops-java-sample 创建 SCM 流水线,Jenkinsfile 路径设置为 Jenkinsfile-online,并配置好相关的秘钥值。 +使用 devops-maven-sample 创建 SCM 流水线,Jenkinsfile 路径设置为 Jenkinsfile-online,并配置好相关的秘钥值。 最后执行时,在 Podman 分支上可以看到如下日志: diff --git a/content/zh/blogs/Kubernetes-multicluster-KubeSphere.md b/content/zh/blogs/Kubernetes-multicluster-KubeSphere.md index 270206cf4..42e1b0d59 100644 --- a/content/zh/blogs/Kubernetes-multicluster-KubeSphere.md +++ b/content/zh/blogs/Kubernetes-multicluster-KubeSphere.md @@ -1,10 +1,10 @@ --- title: '混合云下的 Kubernetes 多集群管理与应用部署' tag: 'KubeSphere, Kubernetes, 多集群管理' -keywords: 'KKubeSphere, Kubernetes, 多集群管理, Kubefed' +keywords: 'KubeSphere, Kubernetes, 多集群管理, Kubefed' description: '本文介绍了 Kubernetes 社区多集群方向的发展历程以及已有的多集群解决方案,分享在混合云的场景下, KubeSphere 如何基于 Kubefed 统一应用的分发与部署,以达到跨 region 的多活/容灾等目的。同时探讨未来多集群领域可能迈向的去中心化的架构。' createTime: '2021-05-26' -author: ' 李宇' +author: '李宇' snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/Kubernetes-multicluster-KubeSphere-banner.jpg' --- @@ -68,7 +68,7 @@ Kubernetes 内部分为 Master 和 Worker 两个角色。Master 上面有 API Se ![](https://pek3b.qingstor.com/kubesphere-community/images/联邦资源.png) 当然 Kubefed 也不是银弹,也有其一定的局限性。从前面可以看到,其 API 定义复杂,容易出错,也只能使用 kubefedctl 加入和解绑集群,没有提供单独的 SDK。再就是它要求控制层集群到管控集群必须网络可达,单集群到多集群需要改造 API,旧版本也不支持联邦资源的状态收集。 -## KubeShere On Kubefed +## KubeSphere On Kubefed 接下来我们看看 KubeSphere 基于 Kubefed 如何实现并简化了多集群管理。 @@ -135,4 +135,3 @@ Virtual Kubelet 可以帮助你把自己的服务伪装成一个 Kubernetes 的 在 Liqo 里面,集群之间不存在联邦关系,左图里在 Kubefed 架构下 k2、k3 两个集群是 k1 的成员集群,资源下方需要经过一次 k1 的 push,而在右边的图里面,k2、k3 只是 k1 的一个节点,因此在部署应用的时候,完全不需要引入任何的 API,k2、k3 看起来就是 k1 的节点,这样业务就可以无感知的被部署到不同的集群上去,极大减少了单集群到多集群改造的复杂性。现在 Liqo 属于刚起步阶段,目前不支持两个集群以上的拓扑,在未来 KubeSphere 也会持续关注开源领域的一些其他的多集群管理方案。 - diff --git a/content/zh/blogs/Serverless-way-for-Kubernetes-Log-Alerting.md b/content/zh/blogs/Serverless-way-for-Kubernetes-Log-Alerting.md deleted file mode 100644 index 64fb25e96..000000000 --- a/content/zh/blogs/Serverless-way-for-Kubernetes-Log-Alerting.md +++ /dev/null @@ -1,426 +0,0 @@ ---- -title: 'OpenFunction 应用系列之一: 以 Serverless 的方式实现 Kubernetes 日志告警' -tag: 'OpenFunction, KubeSphere, Kubernetes' -keywords: 'penFunction, Serverless, KubeSphere, Kubernetes, Kafka, FaaS, 无服务器' -description: '本文提供了一种基于 Serverless 的日志处理思路,可以在降低该任务链路成本的同时提高其灵活性。' -createTime: '2021-08-26' -author: '方阗' -snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202109031518797.png' ---- -## 概述 - -当我们将容器的日志收集到消息服务器之后,我们该如何处理这些日志?部署一个专用的日志处理工作负载可能会耗费多余的成本,而当日志体量骤增、骤降时亦难以评估日志处理工作负载的待机数量。本文提供了一种基于 Serverless 的日志处理思路,可以在降低该任务链路成本的同时提高其灵活性。 - -我们的大体设计是使用 Kafka 服务器作为日志的接收器,之后以输入 Kafka 服务器的日志作为事件,驱动 Serverless 工作负载对日志进行处理。据此的大致步骤为: - -1. 搭建 Kafka 服务器作为 Kubernetes 集群的日志接收器 -2. 部署 OpenFunction 为日志处理工作负载提供 Serverless 能力 -3. 编写日志处理函数,抓取特定的日志生成告警消息 -4. 配置 [Notification Manager](https://github.com/kubesphere/notification-manager/) 将告警发送至 Slack - -![](https://pek3b.qingstor.com/kubesphere-community/images/202108261124546.png) - -在这个场景中,我们会利用到 [OpenFunction](https://github.com/OpenFunction/OpenFunction) 带来的 Serverless 能力。 - -> [OpenFunction](https://github.com/OpenFunction/OpenFunction) 是 KubeSphere 社区开源的一个 FaaS(Serverless)项目,旨在让用户专注于他们的业务逻辑,而不必关心底层运行环境和基础设施。该项目当前具备以下关键能力: -> -> - 支持通过 dockerfile 或 buildpacks 方式构建 OCI 镜像 -> - 支持使用 Knative Serving 或 OpenFunctionAsync ( KEDA + Dapr ) 作为 runtime 运行 Serverless 工作负载 -> - 自带事件驱动框架 - -## 使用 Kafka 作为日志接收器 - -首先,我们为 KubeSphere 平台开启 **logging** 组件(可以参考 [启用可插拔组件](https://kubesphere.io/zh/docs/pluggable-components/) 获取更多信息)。然后我们使用 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) 搭建一个最小化的 Kafka 服务器。 - -1. 在 default 命名空间中安装 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) : - - ```shell - helm repo add strimzi https://strimzi.io/charts/ - helm install kafka-operator -n default strimzi/strimzi-kafka-operator - ``` - -2. 运行以下命令在 default 命名空间中创建 Kafka 集群和 Kafka Topic,该命令所创建的 Kafka 和 Zookeeper 集群的存储类型为 **ephemeral**,使用 emptyDir 进行演示。 - - > 注意,我们此时创建了一个名为 “logs” 的 topic,后续会用到它 - - ```shell - cat < 如果您启用了[多集群功能](https://kubesphere.io/zh/docs/multicluster-management/),您可以选择一个集群。 - -2. 在**集群管理**页面,选择**集群设置**下的**日志收集**。 - -3. 点击**添加日志接收器**并选择 **Kafka**。输入 Kafka 代理地址和端口信息,然后点击**确定**继续。 - -![](https://i.imgur.com/RcIcQ3a.png) - - -4. 运行以下命令验证 Kafka 集群是否能从 Fluent Bit 接收日志: - - ```shell - # 启动一个工具 pod - $ kubectl run utils --image=arunvelsriram/utils -i --tty --rm - # 检查 logs topic 中的日志情况 - $ kafkacat -C -b kafka-logs-receiver-kafka-0.kafka-logs-receiver-kafka-brokers.default.svc:9092 -t logs - ``` - -## 部署 OpenFunction - -按照概述中的设计,我们需要先部署 OpenFunction。OpenFunction 项目引用了很多第三方的项目,如 Knative、Tekton、ShipWright、Dapr、KEDA 等,手动安装较为繁琐,推荐使用 [Prerequisites 文档](https://github.com/OpenFunction/OpenFunction#prerequisites) 中的方法,一键部署 OpenFunction 的依赖组件。 - -> 其中 `--with-shipwright` 表示部署 shipwright 作为函数的构建驱动 -> `--with-openFuncAsync` 表示部署 OpenFuncAsync Runtime 作为函数的负载驱动 -> 而当你的网络在访问 Github 及 Google 受限时,可以加上 `--poor-network` 参数用于下载相关的组件 -```shell -sh hack/deploy.sh --with-shipwright --with-openFuncAsync --poor-network -``` - -部署 OpenFunction: - -> 此处选择安装最新的稳定版本,你也可以使用开发版本,参考 [Install 文档](https://github.com/OpenFunction/OpenFunction#install) -> -> 为了可以正常使用 ShipWright ,我们提供了默认的构建策略,可以使用以下命令设置该策略: -> -> ```shell -> kubectl apply -f https://raw.githubusercontent.com/OpenFunction/OpenFunction/main/config/strategy/openfunction.yaml -> ``` -```shell -kubectl apply -f https://github.com/OpenFunction/OpenFunction/releases/download/v0.3.0/bundle.yaml -``` - -## 编写日志处理函数 - -我们以 [创建并部署 WordPress](https://kubesphere.io/zh/docs/quick-start/wordpress-deployment/) 为例,搭建一个 WordPress 应用作为日志的生产者。该应用的工作负载所在的命名空间为 “demo-project”,Pod 名称为 “wordpress-v1-f54f697c5-hdn2z”。 - -当请求结果为 404 时,我们收到的日志内容如下: -```json -{"@timestamp":1629856477.226758,"log":"*.*.*.* - - [25/Aug/2021:01:54:36 +0000] \"GET /notfound HTTP/1.1\" 404 49923 \"-\" \"curl/7.58.0\"\n","time":"2021-08-25T01:54:37.226757612Z","kubernetes":{"pod_name":"wordpress-v1-f54f697c5-hdn2z","namespace_name":"demo-project","container_name":"container-nrdsp1","docker_id":"bb7b48e2883be0c05b22c04b1d1573729dd06223ae0b1676e33a4fac655958a5","container_image":"wordpress:4.8-apache"}} - -``` - -我们的需求是:当一个请求结果为 404 时,发送一个告警通知给接收器(可以根据 [配置 Slack 通知](https://kubesphere.io/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack/) 配置一个 Slack 告警接收器),并记录命名空间、Pod 名称、请求路径、请求方法等信息。按照这个需求,我们编写一个简单的处理函数: -> 你可以从 [OpenFunction Context Spec](https://github.com/OpenFunction/functions-framework/blob/main/docs/OpenFunction-context-specs.md) 处了解 **openfunction-context** 的使用方法,这是 OpenFunction 提供给用户编写函数的工具库 -> 你可以通过 [OpenFunction Samples](https://github.com/OpenFunction/samples) 了解更多的 OpenFunction 函数案例 - -```go -package logshandler - -import ( - "encoding/json" - "fmt" - "log" - "regexp" - "time" - - ofctx "github.com/OpenFunction/functions-framework-go/openfunction-context" - alert "github.com/prometheus/alertmanager/template" -) - -const ( - HTTPCodeNotFound = "404" - Namespace = "demo-project" - PodName = "wordpress-v1-[A-Za-z0-9]{9}-[A-Za-z0-9]{5}" - AlertName = "404 Request" - Severity = "warning" -) - -// LogsHandler ctx 参数提供了用户函数在集群语境中的上下文句柄,如 ctx.SendTo 用于将数据发送至指定的目的地 -// LogsHandler in 参数用于将输入源中的数据(如有)以 bytes 的方式传递给函数 -func LogsHandler(ctx *ofctx.OpenFunctionContext, in []byte) int { - content := string(in) - // 这里我们设置了三个正则表达式,分别用于匹配 HTTP 返回码、资源命名空间、资源 Pod 名称 - matchHTTPCode, _ := regexp.MatchString(fmt.Sprintf(" %s ", HTTPCodeNotFound), content) - matchNamespace, _ := regexp.MatchString(fmt.Sprintf("namespace_name\":\"%s", Namespace), content) - matchPodName := regexp.MustCompile(fmt.Sprintf(`(%s)`, PodName)).FindStringSubmatch(content) - - if matchHTTPCode && matchNamespace && matchPodName != nil { - log.Printf("Match log - Content: %s", content) - - // 如果上述三个正则表达式同时命中,那么我们需要提取日志内容中的一些信息,用于填充至告警信息中 - // 这些信息为:404 请求的请求方式(HTTP Method)、请求路径(HTTP Path)以及 Pod 名称 - match := regexp.MustCompile(`([A-Z]+) (/\S*) HTTP`).FindStringSubmatch(content) - if match == nil { - return 500 - } - path := match[len(match)-1] - method := match[len(match)-2] - podName := matchPodName[len(matchPodName)-1] - - // 收集到关键信息后,我们使用 altermanager 的 Data 结构体组装告警信息 - notify := &alert.Data{ - Receiver: "notification_manager", - Status: "firing", - Alerts: alert.Alerts{}, - GroupLabels: alert.KV{"alertname": AlertName, "namespace": Namespace}, - CommonLabels: alert.KV{"alertname": AlertName, "namespace": Namespace, "severity": Severity}, - CommonAnnotations: alert.KV{}, - ExternalURL: "", - } - alt := alert.Alert{ - Status: "firing", - Labels: alert.KV{ - "alertname": AlertName, - "namespace": Namespace, - "severity": Severity, - "pod": podName, - "path": path, - "method": method, - }, - Annotations: alert.KV{}, - StartsAt: time.Now(), - EndsAt: time.Time{}, - GeneratorURL: "", - Fingerprint: "", - } - notify.Alerts = append(notify.Alerts, alt) - notifyBytes, _ := json.Marshal(notify) - - // 使用 ctx.SendTo 将内容发送给名为 "notification-manager" 的输出端(你可以在之后的函数配置 logs-handler-function.yaml 中找到它的定义) - if err := ctx.SendTo(notifyBytes, "notification-manager"); err != nil { - panic(err) - } - log.Printf("Send log to notification manager.") - } - return 200 -} - -``` - -我们将这个函数上传到代码仓库中,记录**代码仓库的地址**以及**代码在仓库中的目录路径**,在下面的创建函数步骤中我们将使用到这两个值。 -> 你可以在 [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/OpenFuncAsync/logs-handler-function) 中找到这个案例。 - -## 创建函数 - -接下来我们将使用 OpenFunction 构建上述的函数。首先设置一个用于访问镜像仓库的秘钥文件 **push-secret**(在使用代码构建出 OCI 镜像后,OpenFunction 会将该镜像上传到用户的镜像仓库中,用于后续的负载启动): - -```shell -REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= -kubectl create secret docker-registry push-secret \ - --docker-server=$REGISTRY_SERVER \ - --docker-username=$REGISTRY_USER \ - --docker-password=$REGISTRY_PASSWORD -``` - -应用函数 **logs-handler-function.yaml**: - -> 函数定义中包含了对两个关键组件的使用: -> -> [Dapr](https://dapr.io/) 对应用程序屏蔽了复杂的中间件,使得 logs-handler 可以非常容易地处理 Kafka 中的事件 -> -> [KEDA](https://keda.sh/) 通过监控消息服务器中的事件流量来驱动 logs-handler 函数的启动,并且根据 Kafka 中消息的消费延时动态扩展 logs-handler 实例 - -```yaml -apiVersion: core.openfunction.io/v1alpha1 -kind: Function -metadata: - name: logs-handler -spec: - version: "v1.0.0" - # 这里定义了构建后的镜像的上传路径 - image: openfunctiondev/logs-async-handler:v1 - imageCredentials: - name: push-secret - build: - builder: openfunctiondev/go115-builder:v0.2.0 - env: - FUNC_NAME: "LogsHandler" - # 这里定义了源代码的路径 - # url 为上面提到的代码仓库地址 - # sourceSubPath 为代码在仓库中的目录路径 - srcRepo: - url: "https://github.com/OpenFunction/samples.git" - sourceSubPath: "functions/OpenFuncAsync/logs-handler-function/" - serving: - # OpenFuncAsync 是 OpenFunction 通过 KEDA+Dapr 实现的一种由事件驱动的异步函数运行时 - runtime: "OpenFuncAsync" - openFuncAsync: - # 此处定义了函数的输入(kafka-receiver)和输出(notification-manager),与下面 components 中的定义对应关联 - dapr: - inputs: - - name: kafka-receiver - type: bindings - outputs: - - name: notification-manager - type: bindings - params: - operation: "post" - type: "bindings" - annotations: - dapr.io/log-level: "debug" - # 这里完成了上述输入端和输出端的具体定义(即 Dapr Components) - components: - - name: kafka-receiver - type: bindings.kafka - version: v1 - metadata: - - name: brokers - value: "kafka-logs-receiver-kafka-brokers:9092" - - name: authRequired - value: "false" - - name: publishTopic - value: "logs" - - name: topics - value: "logs" - - name: consumerGroup - value: "logs-handler" - # 此处为 KubeSphere 的 notification-manager 地址 - - name: notification-manager - type: bindings.http - version: v1 - metadata: - - name: url - value: http://notification-manager-svc.kubesphere-monitoring-system.svc.cluster.local:19093/api/v2/alerts - keda: - scaledObject: - pollingInterval: 15 - minReplicaCount: 0 - maxReplicaCount: 10 - cooldownPeriod: 30 - # 这里定义了函数的触发器,即 Kafka 服务器的 “logs” topic - # 同时定义了消息堆积阈值(此处为 10),即当消息堆积量超过 10,logs-handler 实例个数就会自动扩展 - triggers: - - type: kafka - metadata: - topic: logs - bootstrapServers: kafka-logs-receiver-kafka-brokers.default.svc.cluster.local:9092 - consumerGroup: logs-handler - lagThreshold: "10" -``` - -## 结果演示 - -我们先关闭 Kafka 日志接收器:在**日志收集**页面,点击进入 Kafka 日志接收器详情页面,然后点击**更多操作**并选择**更改状态**,将其设置为**关闭**。 - -停用后一段时间,我们可以观察到 logs-handler 函数实例已经收缩到 0 了。 - -再将 Kafka 日志接收器**激活**,logs-handler 随之启动。 - -```shell -~# kubectl get po --watch -NAME READY STATUS RESTARTS AGE -kafka-logs-receiver-entity-operator-568957ff84-tdrrx 3/3 Running 0 7m27s -kafka-logs-receiver-kafka-0 1/1 Running 0 7m48s -kafka-logs-receiver-zookeeper-0 1/1 Running 0 8m12s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 34s -strimzi-cluster-operator-687fdd6f77-kc8cv 1/1 Running 0 10m -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 36s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 37s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 0s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 2s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 1/2 Running 0 4s -logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 2/2 Running 0 11s -``` - -接着我们向 WordPress 应用一个不存在的路径发起请求: - -```shell -curl http:///notfound -``` - -可以看到 Slack 中已经收到了这条消息(与之对比的是,当我们正常访问该 WordPress 站点时, Slack 中并不会收到告警消息): - -![](https://i.imgur.com/YQc5uOq.png) - -### 进一步探索 - -同步函数的解决方案: - -为了可以正常使用 Knative Serving ,我们需要设置其网关的负载均衡器地址。(你可以使用本机地址作为 workaround) - -```bash -# 将下面的 "1.2.3.4" 替换为实际场景中的地址。 -$ kubectl patch svc -n kourier-system kourier \ --p '{"spec": {"type": "LoadBalancer", "externalIPs": ["1.2.3.4"]}}' - -$ kubectl patch configmap/config-domain -n knative-serving \ --type merge --patch '{"data":{"1.2.3.4.sslip.io":""}}' -``` - -除了直接由 Kafka 服务器驱动函数运作(异步方式),OpenFunction 还支持使用自带的事件框架对接 Kafka 服务器,之后以 Sink 的方式驱动 Knative 函数运作。可以参考 [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/Knative/logs-handler-function) 中的案例。 - -在该方案中,同步函数的处理速度较之异步函数有所降低,当然我们同样可以借助 KEDA 来触发 Knative Serving 的 concurrency 机制,但总体而言缺乏异步函数的便捷性。(后续的阶段中我们会优化 OpenFunction 的事件框架来解决同步函数这方面的缺陷) - -由此可见,不同类型的 Serverless 函数有其擅长的任务场景,如一个有序的控制流函数就需要由同步函数而非异步函数来处理。 - -## 综述 - -Serverless 带来了我们所期望的对业务场景快速拆解重构的能力。 - -如本案例所示,OpenFunction 不但以 Serverless 的方式提升了日志处理、告警通知链路的灵活度,还通过函数框架将通常对接 Kafka 时复杂的配置步骤简化为语义明确的代码逻辑。同时,我们也在不断演进 OpenFunction,将在之后版本中实现由自身的 Serverless 能力驱动自身的组件运作。 \ No newline at end of file diff --git a/content/zh/blogs/WeDataSphere-KubeSphere.md b/content/zh/blogs/WeDataSphere-KubeSphere.md index 98b2d767c..8b746718b 100644 --- a/content/zh/blogs/WeDataSphere-KubeSphere.md +++ b/content/zh/blogs/WeDataSphere-KubeSphere.md @@ -169,4 +169,4 @@ WeDataSphere 是我们大数据平台实现的一整套金融级的一站式机 再展望一下,我们 WeDataSpehre 跟 KubeSphere 的未来,目前我们两个社区已经官宣开源合作。 -我们计划把我们 WeDataSphere 大数据平台这些组件全部能容器化,然后贡献到 KubeSpehre 应用商店中,帮助我们用户去快速和高效的完成我们这些组件与应用的生命周期管理、发布。 +我们计划把我们 WeDataSphere 大数据平台这些组件全部能容器化,然后贡献到 KubeSphere 应用商店中,帮助我们用户去快速和高效的完成我们这些组件与应用的生命周期管理、发布。 diff --git a/content/zh/blogs/apache-log4j2-vulnerability-solution.md b/content/zh/blogs/apache-log4j2-vulnerability-solution.md new file mode 100644 index 000000000..f5559b38f --- /dev/null +++ b/content/zh/blogs/apache-log4j2-vulnerability-solution.md @@ -0,0 +1,81 @@ +--- +title: 'KubeSphere 对 Apache Log4j 2 远程代码执行最新漏洞的修复方案' +tag: 'CVE 漏洞' +keywords: 'Elasticsearch, Apache Log4j, 安全漏洞, KubeSphere' +description: 'Apache Log4j 2 是一款开源的日志记录工具,被广泛应用于各类框架中。近期,Apache Log4j 2 被爆出存在漏洞,漏洞现已公开,本文为 KubeSphere 用户提供建议的修复方案。' +createTime: '2021-12-11' +author: 'KubeSphere Team' +snapshot: '../../../images/blogs/log4j/log4j.jpeg' +--- + +Apache Log4j 2 是一款开源的日志记录工具,被广泛应用于各类框架中。近期,Apache Log4j 2 被爆出存在漏洞,漏洞现已公开,本文为 KubeSphere 用户提供建议的修复方案。 + +此次漏洞是由于 Log4j 2 提供的 lookup 功能造成的,该功能允许开发者通过一些协议去读取相应环境中的配置。但在实现的过程中,并未对输入进行严格的判断,从而造成漏洞的发生。由于大量的软件都使用了 Log4j 2 插件,所以大量的 Java 类产品均被波及,包括但不限于 Apache Solr、srping-boot-strater-log4j2、Apache Struts2、ElasticSearch、Dubbo、Redis、Logstash、Kafka...更多组件可以参考 [Log4j 2 相关文档](https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/usages?p=1)。 + +受影响的 Log4j 版本为 Apache Log4j 2.x < 2.15.0-rc2。目前官方发布了 Apache 2.15.0-rc2 版本对该漏洞进行了修复,但是该版本并非正式发行版,故存在不稳定的因素,如要升级建议对相关数据进行备份。 + +同时,也提供了三种方法对漏洞进行补救,为 + +- 将系统环境变量 `FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS` 设置为 `true` +- 修改配置 `log4j2.formatMsgNoLookups=True` +- 修改 JVM 参数 `-Dlog4j2.formatMsgNoLookups=true` + +以下三种解决方法,您可以任选其中一种进行参考。 + +## 方法一:修改系统环境变量 + +由于 KubeSphere 默认使用了 ElasticSearch 收集日志,所以也应该在 KubeSphere 修改相应的配置来对漏洞进行修复。以下说明如何在 KubeSphere 中进行相应的操作对 ElasticSearch 进行修复。 + +将系统环境变量 `FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS` 设置为 True,为此,我们需要修改 ElasticSearch 的 Yaml 文件,因为它是一个 StatefulSet 文件,所以需要进行如下修改: + +```yaml +kubectl edit statefulset elasticsearch-logging-data -n kubesphere-logging-system +kubectl edit statefulset elasticsearch-logging-discovery -n kubesphere-logging-system +``` + +在这两个 Yaml 文件中插入环境变量设置: + +```yaml +env: +- name: FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS + value: "true" +``` + +## 方法二:修改 Log4j 2 配置 + +另外,您也可以修改配置 `log4j2.formatMsgNoLookups=True`,您可以执行如下命令: + +```yaml +kubectl edit configmaps elasticsearch-logging -n kubesphere-logging-system +``` + +然后插入上面所提到的配置: + +```yaml +log4j2.properties: |- + status=error + appender.console.type=Console + appender.console.name=console + appender.console.layout.type=PatternLayout + appender.console.layout.pattern=[%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + rootLogger.level=info + rootLogger.appenderRef.console.ref=console + logger.searchguard.name=com.floragunn + logger.searchguard.level=info + # 插入此行 + log4j2.formatMsgNoLookups=true +``` + +> 注意: +> 1. 修改后请注意相关配置是否挂载进去,如果没有挂载进去,请重启 Pod。 +> 2. 如果您将 KubeSphere Logging 组件重新安装,ks-installer 可能会导致该 ConfigMap 的配置被重置,需要再参考方法二手动配置一遍,或者采取方法一,设置系统环境变量 `FORMAT_MESSAGES_PATTERN_DISABLE_LOOKUPS 为 true`。 + +## 方法三:修改 ElasticSearch 的 JVM 参数 + +除了上述两种方法,您还可以选择在 KubeSphere 集群中的 ElasticSearch 添加配置文件,单独配置 JVM 参数,详见 [ElasticSearch 公告声明](https://discuss.elastic.co/t/apache-log4j2-remote-code-execution-rce-vulnerability-cve-2021-44228-esa-2021-31/291476)。 + +## 相关参考 + +- 关于 Apache Log4j 2 远程代码执行最新漏洞的风险提示: https://itsc.nju.edu.cn/7a/42/c41947a555586/page.htm +- Artifacts using Apache Log4j Core:https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/usages?p=1 + diff --git a/content/zh/blogs/aws-kubernetes.md b/content/zh/blogs/aws-kubernetes.md index c0e75ca2e..e0bb9e8f0 100644 --- a/content/zh/blogs/aws-kubernetes.md +++ b/content/zh/blogs/aws-kubernetes.md @@ -1,5 +1,5 @@ --- -title: 'KubeKey 在 AWS 高可用部署 Kubernetes' +title: 'KubeKey 在 AWS 安装部署 Kubernetes 高可用集群' tag: 'Kubernetes,AWS,亚马逊' keywords: 'Kubernetes, AWS, KubeKey, 高可用, 亚马逊' description: 'KubeKey 是一款可以快速、便捷部署高可用 Kubernetes 集群的工具。本文将主要介绍如何在亚马逊 AWS 部署高可用的 Kubernetes 集群。' @@ -136,7 +136,7 @@ sudo systemctl restart sshd 从[Github Realese Page](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令: ``` -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.0 sh - ``` ### 使用 KubeyKey 部署 diff --git a/content/zh/blogs/calico-ebpf.md b/content/zh/blogs/calico-ebpf.md new file mode 100644 index 000000000..2f69393a4 --- /dev/null +++ b/content/zh/blogs/calico-ebpf.md @@ -0,0 +1,162 @@ +--- +title: '开启 Calico eBPF 数据平面实践' +tag: 'Kubernetes,Calico,eBPF' +keywords: 'Kubernetes, Calico, eBPF' +description: '在本篇文章中,我们将首先演示通过 KubeKey 创建一个标准的 K8s 集群,并切换数据平面到 eBPF,最后基于该数据平面做一个简单的演示。' +createTime: '2021-06-24' +author: '饶云坤' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/calico-ebpf-cover.png' +--- + +## 简介 + +Calico 从 v3.13 开始,集成了 `eBPF` 数据平面。 + +关于什么是 `eBPF`, 以及 `Calico` 为什么引入了 `eBPF` , 并不是本篇文章的重点,感兴趣的朋友可以自行阅读[相关文档](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/)。 + +相比于 Calico 的默认基于 `iptables` 数据平面,`eBPF` 具有更高的吞吐量以外, 还具有 `source IP preservation` 这个功能。 + +在 K8s 中通常都是直接或者间接以 `NodePort` 方式对外暴露接口的。而对于 K8s 这个分布式集群来讲,通常情况下,客户端连接 Node Port 端口的节点和负责响应请求的后端业务 Pod 所在的节点不是同一个节点,为了打通整个数据链路,就不可避免的引入了 `SNAT`。但是这样显然也会带来一个副作用,即业务 Pod 在收到 Packet 以后,其 SRC IP 已经不再是客户端的实际 IP(被伪装成节点的内网 IP )。另一方面,对于一些业务应用来讲,获取客户端 IP 是一个实实在在的刚需。比如:业务应用需要通过客户端 IP 来获取客户登陆的 geo 信息。 + +目前 K8s 主要是通过设置 `externaltrafficpolicy` 来规避这个问题的,但是这个方案本身并不能完全令人满意。Calico 从 v3.13 开始通过集成 `eBPF` 优雅地解决了这个问题。 + +在本篇文章中,我们将首先演示通过 [KubeKey](https://github.com/kubesphere/kubekey) 创建一个标准的 K8s 集群,并切换数据平面到 `eBPF`,最后基于该数据平面做一个简单的演示。 + +## 前提条件 + +较新的内核, 一般 v4.18+ 即可。 + +笔者的测试集群: + +![](https://pek3b.qingstor.com/kubesphere-community/images/1614074234-986729-image.png) + +## 部署 K8s 集群 + +Kubekey 默认的 CNI 插件为 Calico(ipip模式)。这里为了部署方便,直接使用 KubeKey 部署了一个全新的 K8s 集群,版本为 v1.18.6 。KubeKey 的详细用法参见[文档](https://github.com/kubesphere/kubekey/blob/master/README_zh-CN.md)。 + + +## 切换 Calico 数据平面 + +Calico 支持多种数据平面,通过修改配置可以方便地进行切换,详细信息可以参见[官方文档](https://docs.projectcalico.org/maintenance/enabling-bpf)。 + +主要分为以下几步: + +1. 确认 BPF 文件系统已经挂载: + +```shell +mount | grep "/sys/fs/bpf" +``` +如果能看到以下信息,则代表 BPF 文件系统已经挂载: + +![](https://pek3b.qingstor.com/kubesphere-community/images/1614074281-217849-image.png) + + +2. 创建 Calico 配置文件: + +- 首先获取 ks-apiserver endpoints 信息: + +```shell +kubectl get endpoints kubernetes -o wide +``` + +- 由于 KubeKey 是通过 manifest 方式安装的 Calico,这里我们只需要创建一个 cm 即可: + +```yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubernetes-services-endpoint + namespace: kube-system +data: + KUBERNETES_SERVICE_HOST: "" + KUBERNETES_SERVICE_PORT: "" +``` + +- 重启 Calico pods,并等待 Calico Pod 重新变为 Running 状态 + +```shell +kubectl delete pod -n kube-system -l k8s-app=calico-node +kubectl delete pod -n kube-system -l k8s-app=calico-kube-controllers +``` + +- 关闭 kube-proxy + +```shell +kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' +``` + +- 开启 eBPF 模式 + +```shell +calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": true}}' +``` + +- 由于我们需要保留客户端 IP,所以需要开启 `DSR` 模式。 + +```shell +calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}' +``` + +至此,Calico 的整个网络环境已经配置完毕。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1614074348-806671-image.png) + +## 体验 + +为了验证 Calico 切换到 `eBPF` 数据平面以后,后端确实可以拿到客户端的真实 IP ,下面我们会在集群中部署一个 Nginx 服务,并通过 nodeport 方式暴露接口。 + +创建 Nginx 实例并暴露外部接口: + +```shell +master:~$ kubectl apply -f - <:30604 +``` + +查询 Nginx 日志,查看 client IP: + +![](https://pek3b.qingstor.com/kubesphere-community/images/1614074617-657518-image.png) + +> 注意:如果集群本身部署在云平台环境中,如果节点位于 VPC 网络当中,需要设置相应的端口转发规则,并开启相应的防火墙端口。 diff --git a/content/zh/blogs/calico-guide.md b/content/zh/blogs/calico-guide.md new file mode 100644 index 000000000..5619ed9f3 --- /dev/null +++ b/content/zh/blogs/calico-guide.md @@ -0,0 +1,600 @@ +--- +title: 'Calico 路由反射模式权威指南' +tag: 'Kubernetes,KubeSphere,Calico' +keywords: 'Kubernetes, KubeSphere, Calico, 网络插件, CNI' +description: '作为 Kubernetes 最长使用的一种网络插件,Calico 具有很强的扩展性,较优的资源利用和较少的依赖,相较于 Flannel 插件采用 Overlay 的网络,Calico 可以通过三层路由的方式采用性能更佳的 Underlay 网络,Calico 网络插件的转发效率是所有方案中较高的。' +createTime: '2021-06-02' +author: '武献雨' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/calico-cover.png' +--- + +## 概述 + +作为 Kubernetes 最长使用的一种网络插件,Calico 具有很强的扩展性,较优的资源利用和较少的依赖,相较于 Flannel 插件采用 Overlay 的网络,Calico 可以通过三层路由的方式采用性能更佳的 Underlay 网络,Calico 网络插件的转发效率是所有方案中较高的。 + +在使用 Calico 网络插件的实际生产环境当中,为了提高网络的性能和灵活性,需要将 K8s 的工作节点和物理网络中的 leaf 交换机建立 BGP 邻居关系,同步 BGP 路由信息,可以将 pod 网络的路由发布到物理网络中。Calico 给出了三种类型的 BGP 互联方案,分别是 **Full-mesh**、**Route reflectors** 和 **Top of Rack (ToR)**。 + +### Full-mesh + +全互联模式,启用了 BGP 之后,Calico 的默认行为是在每个节点彼此对等的情况下创建完整的内部 BGP(iBGP)连接,这使 Calico 可以在任何 L2 网络(无论是公有云还是私有云)上运行,或者说(如果配了 IPIP)可以在任何不禁止 IPIP 流量的网络上作为 Overlay 运行。对于 vxlan overlay,Calico 不使用 BGP。 + +Full-mesh 模式对于 100 个以内的工作节点或更少节点的中小规模部署非常有用,但是在较大的规模上,Full-mesh 模式效率会降低,较大规模情况下,Calico 官方建议使用 Route reflectors。 + +### Route reflectors + +如果想构建内部 BGP(iBGP)大规模集群,可以使用 BGP 路由反射器来减少每个节点上使用 BGP 对等体的数量。在此模型中,某些节点充当路由反射器,并配置为在它们之间建立完整的网格。然后,将其他节点配置为与这些路由反射器的子集(通常为冗余,通常为 2 个)进行对等,从而与全网格相比减少了 BGP 对等连接的总数。 + +### Top of Rack(ToR) + +在本地部署中,可以将 Calico 配置为直接与物理网络基础结构对等。通常,这需要涉及到禁用 Calico 的默认 Full-mesh 行为,将所有 Calico 节点与 L3 ToR 路由器对等。 + +本篇文章重点会介绍如何在 BGP 网络环境下配置 Calico 路由反射器,本篇主要介绍将 K8S 工作节点作为路由反射器和物理交换机建立 BGP 连接。配置环境拓扑如下: + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210602104325.png) + +在本次环境中,分别有一台 spine 交换机和两台 leaf 交换机来建立 EBGP 连接。所有 leaf 交换机都属于一个独立的自治系统,所有 leaf 交换机下的 node 都属于一个独立的自治系统。Kubernetes 集群节点中每个 leaf 下由两台工作节点作为 CalicoRR(路由反射器),之所以用两台 node 作为路由反射器是考虑冗余性,所有 Calico RR 都跟自己上联的 leaf 交换机建立 EBGP 连接。Calico RR 和自己所属的 node 之间建立 iBGP 连接。 + +## 安装 calicoctl + +Calico RR 所有配置操作都需要通过 calicoctl 工具来完成, calicoctl 允许从命令创建,读取,更新和删除 Calico 对象,所以我们首先需要在 Kubernetes 所有的工作节点上安装 calicoctl 工具。 + +采用二进制方式安装 calicoctl 工具。 + +登录到主机,打开终端提示符,然后导航到安装二进制文件位置,一般情况下 calicoctl 安装到 /usr/local/bin/。 + +使用以下命令下载 calicoctl 二进制文件,版本号选择自己 Calico 的版本。 + +``` +$ curl -O -L https://github.com/projectcalico/calicoctl/releases/download/v3.17.2/calicoctl +``` + +将文件设置为可执行文件。 + +``` +$ chmod +x calicoctl +``` + +每次执行 calicoctl 之前需要设置环境变量。 + +``` +$ export DATASTORE_TYPE=kubernetes +$ export KUBECONFIG=~/.kube/config +``` + +如果不希望每次执行 calicoctl 之前都需要设置环境变量,可以将环境变量信息写到永久写入到 /etc/calico/calicoctl.cfg 文件里,calicoctl.cfg 配置文件编辑如下: + +```yaml +apiVersion: projectcalico.org/v3 +kind: CalicoAPIConfig +metadata: +spec: + datastoreType: "kubernetes" + kubeconfig: "/root/.kube/config" +``` + +命令使用: + +``` +[root@node1 ~]# calicoctl -h +Usage: + calicoctl [options] [...] + + create Create a resource by filename or stdin. + replace Replace a resource by filename or stdin. + apply Apply a resource by filename or stdin. This creates a resource + if it does not exist, and replaces a resource if it does exists. + patch Patch a pre-exisiting resource in place. + delete Delete a resource identified by file, stdin or resource type and + name. + get Get a resource identified by file, stdin or resource type and + name. + label Add or update labels of resources. + convert Convert config files between different API versions. + ipam IP address management. + node Calico node management. + version Display the version of calicoctl. + export Export the Calico datastore objects for migration + import Import the Calico datastore objects for migration + datastore Calico datastore management. + +Options: + -h --help Show this screen. + -l --log-level= Set the log level (one of panic, fatal, error, + warn, info, debug) [default: panic] + +Description: + The calicoctl command line tool is used to manage Calico network and security + policy, to view and manage endpoint configuration, and to manage a Calico + node instance. + + See 'calicoctl --help' to read about a specific subcommand. +``` + +## 关闭 Full-mesh 模式 + +Calico 默认是 Full-mesh 全互联模式,Calico 集群中的的节点之间都会建立连接,进行路由交换。但是随着集群规模的扩大,mesh 模式将形成一个巨大服务网格,连接数成倍增加。这时就需要使用 Route Reflector(路由器反射)模式解决这个问题。确定一个或多个 Calico 节点充当路由反射器,让其他节点从这个 RR 节点获取路由信息。 + +关闭 node-to-node BGP 网络,具体操作步骤如下: + +添加 default BGP 配置,调整 nodeToNodeMeshEnabled 和 asNumber: + +``` +[root@node1 calico]# cat bgpconf.yaml +apiVersion: projectcalico.org/v3 +kind: BGPConfiguration +metadata: + name: default +spec: + logSeverityScreen: Info + nodeToNodeMeshEnabled: false + asNumber: 64512 +``` + +直接应用一下,应用之后会马上禁用 Full-mesh: + +``` +[root@node1 calico]# calicoctl apply -f bgpconf.yaml +Successfully applied 1 'BGPConfiguration' resource(s) +``` + +查看 bgp 网络配置情况,false 为关闭: + +``` +[root@node1 calico]# calicoctl get bgpconfig +NAME LOGSEVERITY MESHENABLED ASNUMBER +default Info false 64512 +``` + +## 修改工作节点的 Calico 配置 + +通过 calicoctl get nodes --output=wide 可以获取各节点的 ASN 号: + +``` +[root@node1 calico]# calicoctl get nodes --output=wide +NAME ASN IPV4 IPV6 +node1 (64512) 172.20.0.11/24 +node2 (64512) 172.20.0.12/24 +node3 (64512) 172.20.0.13/24 +node4 (64512) 173.20.0.11/24 +node5 (64512) 173.20.0.12/24 +node6 (64512) 173.20.0.13/24 +``` + +可以看到获取的 ASN 号都是“(64512)”,这是因为如果不给每个节点指定 ASN 号,默认都是 64512。我们可以按照拓扑图配置各个节点的 ASN 号,不同 leaf 交换机下的节点,ASN 号不一样,每个 leaf 交换机下的工作节点都是一个独立自治系统。 + +通过如下命令,获取工作节点的 Calico 配置信息: + +``` +$ calicoctl get node node1 -o yaml > node1.yaml +``` + +每一个工作节点的 Calico 配置信息都需要获取一下,输出为 yaml 文件,“node1”为 Calico 节点的名称。 + +按照如下格式进行修改: + +``` +[root@node1 calico]# cat node1.yaml +apiVersion: projectcalico.org/v3 +kind: Node +metadata: + annotations: + projectcalico.org/kube-labels: '{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"node1","kubernetes.io/os":"linux","node-role.kubernetes.io/master":"","node-role.kubernetes.io/worker":"","rr-group":"rr1","rr-id":"rr1"}' + creationTimestamp: null + labels: + beta.kubernetes.io/arch: amd64 + beta.kubernetes.io/os: linux + kubernetes.io/arch: amd64 + kubernetes.io/hostname: node1 + kubernetes.io/os: linux + node-role.kubernetes.io/master: "" + node-role.kubernetes.io/worker: "" + name: node1 +spec: + bgp: + asNumber: 64512 ## asNumber根据自己需要进行修改 + + ipv4Address: 172.20.0.11/24 + routeReflectorClusterID: 172.20.0.11 ## routeReflectorClusterID一般改成自己节点的IP地址 + orchRefs: + - nodeName: node1 + orchestrator: k8s +status: + podCIDRs: + - "" + - 10.233.64.0/24 +``` + +将所有节点的 Calico 配置信息全部修改之后,通过 calicoctl get nodes -o wide 命令获取到的节点信息如下: + +``` +[root@node1 calico]# calicoctl get nodes -o wide +NAME ASN IPV4 IPV6 +node1 64512 172.20.0.11/24 +node2 64512 172.20.0.12/24 +node3 64512 172.20.0.13/24 +node4 64513 173.20.0.11/24 +node5 64513 173.20.0.12/24 +node6 64513 173.20.0.13/24 +``` + +上面可以可以看到所有的 ASN 好都已变为手动指定的,不在是全局默认的。 + +## 为 node 节点进行分组(添加 label) + +为方便让 BGPPeer 轻松选择节点,在 Kubernetes 集群中,我们需要将所有节点通过打 label 的方式进行分组,这里,我们将 label 标签分为下面几种: + +- rr-group 这里定义为节点所属的 Calico RR 组,主要有 rr1 和 rr2 两种,为不同 leaf 交换机下的 Calico RR +- rr-id 这里定义为所属 Calico RR 的 ID,节点添加了该标签说明该节点作为了路由反射器,主要有 rr1 和 rr2 两种,为不同 leaf 交换机下的 Calico RR + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210602104444.png) + +通过以下命令为每个节点添加 label: + +``` +$ kubectl label nodes node1 rr-group=rr1 +$ kubectl label nodes node1 rr-id=rr1 +``` + +查看最终设置情况: + +``` +[root@node1 calico]# kubectl get nodes --show-labels +NAME STATUS ROLES AGE VERSION LABELS +node1 Ready master,worker 31d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/worker=,rr-group=rr1,rr-id=rr1 +node2 Ready master,worker 31d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/worker=,rr-group=rr1,rr-id=rr1 +node3 Ready master,worker 31d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node3,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/worker=,rr-group=rr1 +node4 Ready worker 16d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node4,kubernetes.io/os=linux,node-role.kubernetes.io/worker=,rr-group=rr2,rr-id=rr2 +node5 Ready worker 16d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node5,kubernetes.io/os=linux,node-role.kubernetes.io/worker=,rr-group=rr2,rr-id=rr2 +node6 Ready worker 16d v1.17.9 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node5,kubernetes.io/os=linux,node-role.kubernetes.io/worker=,rr-group=rr2,rr-id=rr2 +``` + +## 配置 BGPPeer + +在配置 BGPPeer 之前,我们可以先查看一下各个 node BGP 的节点状态,因为已经禁用了 Full-mesh,并且现在还没有配置 BGPPeer,所以所有节点里的信息都是空的。 + +``` +[root@node3 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status +No IPv4 peers found. + +IPv6 BGP status +No IPv6 peers found. +``` + +根据环境拓扑,node1 和 node2 作为 Calico RR,需要和 leaf01 交换机建立 BGP 连接;node4 和 node5 作为 Calico RR 需要和 leaf02 交换机建立 BGP 连接;node1、node2 和 node3 需要和 RR1 建立 BGP 连接;node4、node5 和 node6 需要和 RR2 建立 BGP 连接。按照下面步骤依次配置: + +1. RR1 和 leaf01 建立 BGP 连接 + +编写配置文件,取名为“rr1-to-leaf1-peer.yaml”,配置文件编辑如下: + +``` +[root@node1 calico]# cat rr1-to-leaf1-peer.yaml +apiVersion: projectcalico.org/v3 +kind: BGPPeer +metadata: + name: rr1-to-leaf1-peer ## 给BGPPeer取一个名称,方便识别 + +spec: + nodeSelector: rr-id == 'rr1' ## 通过节点选择器添加有rr-id == 'rr1'标签的节点 + + peerIP: 172.20.0.254 ## leaf01交换机的地址 + asNumber: 65009 ## leaf01交换机的AS号 +``` + +应用该配置: + +``` +[root@node1 calico]# calicoctl apply -f rr1-to-leaf1-peer.yaml +Successfully applied 1 'BGPPeer' resource(s) +``` + +2. RR1 和自己所属的节点建立 BGP 连接 + +RR1 所属的节点主要有 node1、node2 和 node3,也就是打了 rr-group=rr1 标签的节点,配置文件编写如下: + +``` +[root@node1 calico]# cat rr1-to-node-peer.yaml +apiVersion: projectcalico.org/v3 +kind: BGPPeer +metadata: + name: rr1-to-node-peer ## 给BGPPeer取一个名称,方便识别 + +spec: + nodeSelector: rr-group == 'rr1' ## 通过节点选择器添加有rr-group == ‘rr1’标签的节点 + + peerSelector: rr-id == 'rr1' ## 通过peer选择器添加有rr-id == ‘rr1’标签的路由反射器 +``` + +应用该配置: + +``` +[root@node1 calico]# calicoctl apply -f rr1-to-node-peer.yaml +Successfully applied 1 'BGPPeer' resource(s) +``` + +3. 在 leaf01 交换机上操作,建立 leaf01 交换机和 RR1 的 BGP 连接,交换机配置完成后,可以查看交换机 bgp peer 的连接状态 + +``` +[leaf01]show bgp peer ipv4 + + BGP local router ID: 2.2.2.2 + Local AS number: 65009 + Total number of peers: 3 Peers in established state: 3 + + * - Dynamically created peer + Peer AS MsgRcvd MsgSent OutQ PrefRcv Up/Down State + + 100.0.0.1 65008 1696 1677 0 8 23:52:28 Established + 172.20.0.11 64512 1648 1506 0 4 23:51:50 Established + 172.20.0.12 64512 1647 1659 0 4 23:51:44 Established +``` + +上面 172.20.0.11 和 172.20.0.12 是 node1 和 node2 节点,也就是 RR1。状态显示为“Established“说明 BGP 连结已建立。 + +4. RR2 和 leaf02 建立 BGP 连接 + +编写配置文件,取名为“rr2-to-leaf2-peer.yaml”,配置文件编辑如下: + +``` +[root@node1 calico]# cat rr2-to-leaf2-peer.yaml +apiVersion: projectcalico.org/v3 +kind: BGPPeer +metadata: + name: rr2-to-leaf2-peer ## 给BGPPeer取一个名称,方便识别 + +spec: + nodeSelector: rr-id == 'rr2' ## 通过节点选择器添加有rr-id == 'rr2'标签的节点 + peerIP: 173.20.0.254 ## leaf02交换机的地址 + asNumber: 65010 ## leaf02交换机的AS号 +``` + +应用该配置: + +``` +[root@node1 calico]# calicoctl apply -f rr2-to-leaf2-peer.yaml +Successfully applied 1 'BGPPeer' resource(s) +``` + +5. RR2 和自己所属的节点建立 BGP 连接 + +RR2 所属的节点主要有 node4、node5 和 node6,也就是打了 rr-group=rr2 标签的节点,配置文件编写如下: + +``` +[root@node1 calico]# cat rr2-to-node-peer.yaml +apiVersion: projectcalico.org/v3 +kind: BGPPeer +metadata: + name: rr2-to-node-peer ## 给BGPPeer取一个名称,方便识别 + +spec: + nodeSelector: rr-group == 'rr2' ## 通过节点选择器添加有rr-group == ‘rr2’标签的节点 + peerSelector: rr-id == 'rr2' ## 通过peer选择器添加有rr-id == ‘rr2’标签的路由反射器 +``` + +应用该配置: + +``` +[root@node1 calico]# calicoctl apply -f rr2-to-node-peer.yaml +Successfully applied 1 'BGPPeer' resource(s) +``` + +6. 在 leaf02 交换机上操作,建立 leaf02 交换机和 RR2 的 BGP 连接 + +交换机配置完成后,可以查看交换机 bgp peer 的连接状态: + +``` +sys +System View: return to User View with Ctrl+Z. +[leaf02]show bgp peer ipv4 + + BGP local router ID: 3.3.3.3 + Local AS number: 65010 + Total number of peers: 3 Peers in established state: 3 + + * - Dynamically created peer + Peer AS MsgRcvd MsgSent OutQ PrefRcv Up/Down State + + 100.0.0.5 65008 1561 1686 0 11 24:01:03 Established + 173.20.0.11 64513 1655 1650 0 2 23:59:44 Established + 173.20.0.12 64513 1661 1883 0 2 23:59:56 Established +``` + +上面 173.20.0.11 和 173.20.0.12 是 node4 和 node5 节点,也就是 RR2。状态显示为“Established“说明 BGP 连结已建立。 + +最后,我们可以通过 calicoctl get bgppeer 命令来查看所有的 BGPPeer 配置条目: + +``` +[root@node1 calico]# calicoctl get bgppeer +NAME PEERIP NODE ASN +rr1-to-leaf1-peer 172.20.0.254 rr-id == 'rr1' 65009 +rr1-to-node-peer rr-group == 'rr1' 0 +rr2-to-leaf2-peer 173.20.0.254 rr-id == 'rr2' 65010 +rr2-to-node-peer rr-group == 'rr2' 0 + +``` + +如果想删除某个 BGPPeer 条目,可以通过下面的命令: + +``` +$ calicoctl delete bgppeer rr2-to-node-peer +``` + +## 工作节点配置验证 + +至此,BGPPeer 配置已完成,可以在各个节点里再次查看 BGPPeer 状态信息。 + +在 node1 节点操作: + +``` +[root@node1 calico]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 172.20.0.12 | node specific | up | 2021-02-16 | Established | +| 172.20.0.13 | node specific | up | 2021-02-16 | Established | +| 172.20.0.254 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 leaf01 交换机、node2 和 node3 节点建立了 BGP 连接。 + +在 node2 节点操作: + +``` +[root@node2 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 172.20.0.11 | node specific | up | 2021-02-16 | Established | +| 172.20.0.13 | node specific | up | 2021-02-16 | Established | +| 172.20.0.254 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 leaf01 交换机、node1 和 node3 节点建立了 BGP 连接。 + +在 node3 节点操作: + +``` +[root@node3 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 172.20.0.11 | node specific | up | 2021-02-16 | Established | +| 172.20.0.12 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 node1 和 node2 节点建立了 BGP 连接,因为该节点不作为路由反射器节点,所以并为与 leaf01 交换机建立 BGP 连接。 + +在 node4 节点操作: + +``` +[root@node4 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 173.20.0.12 | node specific | up | 2021-02-16 | Established | +| 173.20.0.13 | node specific | up | 2021-02-16 | Established | +| 173.20.0.254 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 leaf02 交换机、node5 和 node6 节点建立了 BGP 连接。 + +在 node5 节点操作: + +``` +[root@node5 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 173.20.0.11 | node specific | up | 2021-02-16 | Established | +| 173.20.0.13 | node specific | up | 2021-02-16 | Established | +| 173.20.0.254 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 leaf02 交换机、node4 和 node6 节点建立了 BGP 连接。 + +在 node6 节点操作: + +``` +[root@node6 ~]# calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+---------------+-------+------------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+---------------+-------+------------+-------------+ +| 173.20.0.11 | node specific | up | 2021-02-16 | Established | +| 173.20.0.12 | node specific | up | 2021-02-16 | Established | ++--------------+---------------+-------+------------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +可以看到该节点已经和 node4 和 node5 节点建立了 BGP 连接,因为该节点不作为路由反射器节点,所以并为与 leaf02 交换机建立 BGP 连接。 + +## 交换机配置验证 + +我们可以在所有交换机里去查看 BGP 同步的路由信息有没有署于 pod 的路由地址。 + +Spine 交换机操作: + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210602104537.png) + +Leaf01 交换机操作: + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210602104556.png) + +Leaf02 交换机操作: + +![](https://pek3b.qingstor.com/kubesphere-community/images/20210602104612.png) + +在上面交换机操作截图中,10.233 开头的地址段都是 pod 地址段的路由信息。 + +## 将 Service 地址路由同步到物理网络 + +有些时候不光需要 Pod 地址可以在现网可被路由,Service 地址也会有这个需求,我们可以通过修改 bgpconfig 配置来实现 Service 地址的路由同步。 + +首先检查是否具有默认的 BGP 配置: + +``` +[root@node1 ~]# calicoctl get bgpconfig default +NAME LOGSEVERITY MESHENABLED ASNUMBER +default Info false 64512 +``` + +默认的 BGP 配置是存在的,更新 BGP 配置: + +``` +[root@node1 ~]# calicoctl patch BGPConfig default --patch \ +> '{"spec": {"serviceClusterIPs": [{"cidr": "10.233.0.0/18"}]}}' +Successfully patched 1 'BGPConfiguration' resource +``` + +注意将上面 10.233.0.0./18 地址段修改为 Service 的地址段。 + +上述配置完成之后,便可以在交换机里看到已经同步过来的 Service 地址段的路由信息。 + +## 文档参考链接 + +绝大多数配置都可以通过 Calico 官方文档获取,以下就是撰写本文参考的主要官方文档链接: + +- [https://docs.projectcalico.org/networking/bgp](https://docs.projectcalico.org/networking/bgp "https://docs.projectcalico.org/networking/bgp") +- [https://docs.projectcalico.org/getting-started/clis/calicoctl/install](https://docs.projectcalico.org/getting-started/clis/calicoctl/install "https://docs.projectcalico.org/getting-started/clis/calicoctl/install") +- [https://docs.projectcalico.org/networking/advertise-service-ips#advertise-service-cluster-ip-addresses](https://docs.projectcalico.org/networking/advertise-service-ips#advertise-service-cluster-ip-addresses "https://docs.projectcalico.org/networking/advertise-service-ips#advertise-service-cluster-ip-addresses") diff --git a/content/zh/blogs/calico-wireguard-support-with-azure-cni.md b/content/zh/blogs/calico-wireguard-support-with-azure-cni.md new file mode 100644 index 000000000..6b7e67249 --- /dev/null +++ b/content/zh/blogs/calico-wireguard-support-with-azure-cni.md @@ -0,0 +1,107 @@ +--- +title: '在 Azure CNI 中启用 Calico WireGuard' +tag: 'Kubernetes,Calico,Azure' +keywords: 'Kubernetes, Calico, Azure, AKS, WireGuard' +description: '本文描述了在 Kubernetes 中使用 WireGuard 的解决方案' +createTime: '2021-09-08' +author: 'Peter Kelly' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/%E5%9C%A8%20Azure%20CNI%20%E4%B8%AD%E5%90%AF%E7%94%A8%20Calico%20WireGuard.png' +--- + +> 原文链接:[https://thenewstack.io/calico-wireguard-support-with-azure-cni/](https://thenewstack.io/calico-wireguard-support-with-azure-cni/) + +> **作者:Peter Kelly**
    +> **译者:田璧州**
    +> **注:本文已取得作者本人的翻译授权** + +去年 6 月,[Tigera](https://www.tigera.io/?utm_content=inline-mention) 宣布首次在 k8s 上支持用于集群内加密传输的开源 VPN,[WireGuard](https://www.wireguard.com/) 。我们从来不喜欢坐以待毙,所以我们一直在努力为这项技术开发一些令人兴奋的新功能,其中第一个功能是使用 [Azure 容器网络接口](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) (CNI) 在 [Azure Kubernetes 服务](https://azure.microsoft.com/en-us/services/kubernetes-service/) (AKS) 上支持 WireGuard。 + +首先,这里简单回顾一下什么是 WireGuard 以及我们如何在 Calico 中使用它。 + +WireGuard 是一种 VPN 技术,从 linux 5.6 内核开始默认包含在内核中,它被定位为 IPsec 和 OpenVPN 的替代品。它的目标是更加快速、安全、易于部署和管理。正如不断涌现的 SSL/TLS 的漏洞显示,密码的敏捷性会极大增加复杂性,这与 WireGuard 的目标不符,为此,WireGuard 故意将密码和算法的配置灵活性降低,以减少该技术的可攻击面和可审计性。它的目标是更加简单快速,所以使用标准的 Linux 网络命令便可以很容易的对它进行配置,并且只有约 4000 行代码,使得它的代码可读性高,容易理解和接受审查。 + +WireGuard 是一种 VPN 技术,通常被认为是 C/S 架构。它同样能在端对端的网格网络架构中配置使用,这就是 Tigera 设计的 WireGuard 可以在 Kubernetes 中启用的解决方案。使用 Calico,所有启用 WireGuard 的节点将端对端形成一个加密的网格。Calico 甚至支持在同一集群内同时包含启用 WireGuard 的节点与未启用 WireGuard 的节点,并且可以相互通信。 + +![](https://cdn.jsdelivr.net/gh/kubesphere-sigs/awesome-cloud-native-blogs@master/2021/08/calico-wireguard-support-with-azure-cni/1.1.png) + +我们选择 WireGuard 并不是一个折中的方案。我们希望提供最简单、最安全、最快速的方式来加密传输 Kubernetes 集群中的数据,而无需使用 mTLS、IPsec 或其他复杂的配置。事实上,您可以把 WireGuard 看成是另一个具有加密功能的 Overlay。 + +用户只需一条命令就可以启用 WireGuard,而 Calico 负责完成剩余的工作,包括: + +- 在每个节点创建 WireGuard 的网络接口 +- 计算并编写最优的 MTU +- 为每个节点创建 WireGuard 公钥私钥对 +- 向每个节点添加公钥,以便在集群中共享资源 +- 为每个节点编写端对端节点 +- 使用防火墙标记(fwmark)编写 IP route、IP tables 和 Routing tables,以此正确处理各自节点上的路由 + +您仅需指明意图,其他的事情都由集群完成 + +## 使用 WireGuard 时的数据包流向 + +下图显示了启用 WireGuard 后集群中的各种数据包流量情况。 + +![](https://cdn.jsdelivr.net/gh/kubesphere-sigs/awesome-cloud-native-blogs@master/2021/08/calico-wireguard-support-with-azure-cni/2.1.png) + +同一主机上的 Pod: + +- 数据包被路由到 WireGuard 表。 +- 如果目标 IP 是同一主机上的 Pod,Calico 则在 WireGuard 路由表中插入一个 “ throw ” 条目,将数据包引导回主路由表。由此,数据包将被定向到目标 Pod 匹配的 veth 接口,并且它将在未加密的情况下流动(在图中以绿色显示)。 + +不同节点上的 Pod: + +- 数据包被路由到 WireGuard 表。 + +- 路由条目与目标 Pod IP 匹配并发送到 WireGuard 组件: cali.wireguard + +- WireGuard 组件加密并封装数据包(在图中以红色显示)并设置 fwmark 以防止路由环路。 + +- WireGuard 组件使用与目标 Pod IP(允许的 IP)匹配的对等方的公钥对数据包进行加密,将其封装在 UDP 中,并使用特殊的 fwmark 对其进行标记以防止路由环路。 + +- 数据包通过 eth0 发送到目标节点并解密。 + +- 这也适用于主机流量(例如,节点联网的 Pod)。 + +在以下动画中,您可以看到 3 种流量: + +1. 同一主机上 Pod 到 Pod 未被加密的流量。 +2. 不同主机上的 Pod 到 Pod 被加密的流量。 +3. 主机到主机的流量也会被加密。 + +> 注意:绿色表示未加密流量,红色表示加密流量。 + ++ [动画演示](https://tigera.wistia.com/medias/ddl8bmhpgp?utm_source=thenewstack&utm_medium=website&utm_campaign=platform) + +## WireGuard 在 AKS 的应用 + +在 AKS 上使用 Azure CNI 支持使用 WireGuard 带来了一些非常有趣的挑战。 + +首先,使用 Azure CNI 意味着不使用 Calico IPAM( IP 地址管理)和 CIDR(无类域间路由)块来分配 Pod IP 。相反,它们是采用与节点 IP 相同的分配方式从底层 VNet 进行分配。这对 WireGuard 路由来说是一个有趣的挑战,以往我们可以在 WireGuard 配置中的 Allowed IPs 列表中添加一个 CIDR 块,相比之下,我们现在必须写出该节点上所有 Pod IP。这需要 Calico 将 routeSource 的配置设为 workloadIPs。如果您使用的是我们的 operator 方式部署 AKS 集群,便无需额外配置。 + +使用 wireguard-tools 中优秀的工具 [wg](https://git.zx2c4.com/wireguard-tools/about/src/man/wg.8),可以查看集群内端对端节点允许通过的 IP 列表,其中包括每个节点的 Pod IP 和主机 IP(注意终端 IP 也在允许 IP 列表中)。在 AKS 集群上,WireGuard 提供了业务流量加密和主机到主机的加密。 + +```shell + interface: wireguard.cali + public key: bbcKpAY+Q9VpmIRLT+yPaaOALxqnonxBuk5LRlvKClA= + private key: (hidden) + listening port: 51820 + fwmark: 0x100000 + + peer: /r0PzTX6F0ZrW9ExPQE8zou2rh1vb20IU6SrXMiKImw= + endpoint: 10.240.0.64:51820 + allowed ips: 10.240.0.64/32, 10.240.0.65/32, 10.240.0.66/32 + latest handshake: 11 seconds ago + transfer: 1.17 MiB received, 3.04 MiB sent + + peer: QfUXYghyJWDcy+xLW0o+xJVsQhurVNdqtbstTsdOp20= + endpoint: 10.240.0.4:51820 + allowed ips: 10.240.0.4/32, 10.240.0.5/32, 10.240.0.6/32 + latest handshake: 46 seconds ago + transfer: 83.48 KiB received, 365.77 KiB sent +``` + +第二个挑战是正确处理 MTU(最大传输单元)。[Azure 设置的 MTU 是 1500](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu),而 WireGuard 在数据包上设置了一个 DF(Don't Fragment)标记。如果没有正确调整 WireGuard MTU,我们会在启用 WireGuard 时发现有丢包和低带宽。我们可以在 AKS 中通过 Calico 设置为自动检测,从而为 WireGuard 配置正确的开销和 [MTU](https://docs.projectcalico.org/networking/mtu) 来优化。 + +我们还可以将节点 IP 本身添加为端对端节点允许通信的 IP ,并通过 AKS 中的 WireGuard 处理主机联网的 Pod 和主机到主机通信。主机到主机通信的方法是,当 [RPF](https://en.wikipedia.org/wiki/Reverse-path_forwarding)(反向路径转发)发生时,通过 WireGuard 接口获得路由返回的响应。通过在目的节点的接收数据包上设置一个标记,并配置内核确保遵守 sysctl 中的 RPF 标记来解决这个问题。 + +现在您使用 AKS 时,节点之间的业务流量和主机到主机通信都会被加密。您仅需指明意图,其他的事情都由集群完成。 \ No newline at end of file diff --git a/content/zh/blogs/changhong-kubernetes-autoscaling-canaryrelease.md b/content/zh/blogs/changhong-kubernetes-autoscaling-canaryrelease.md index ada0a3d9b..30a5183a6 100644 --- a/content/zh/blogs/changhong-kubernetes-autoscaling-canaryrelease.md +++ b/content/zh/blogs/changhong-kubernetes-autoscaling-canaryrelease.md @@ -107,13 +107,13 @@ CA 依赖于云平台的能力,而通过设置 HPA 可以触发 CA。在 KubeS 由于我们是先部署好 Kubernetes,在其上再安装 KubeSphere的,那么我们需要在安装后的 KubeSphere 启用该功能。 启用方法: -1. 在 KubeSphere 的集群管理界面,找到自定义资源 CRD +1. 在 KubeSphere 的集群管理界面,找到 CRD 2. 打开后输入 `clusterconfiguration` 进行搜索 3. 点击搜索结果,在打开的页面中点击 ks-installer 右侧的按钮,选择编辑配置文件 4. 在打开的 YAML 文件里找到 Metrics Server,在 enabled 一行将 false 更改为 true 5. 之后点击右下角的更新即可 -![自定义资源 CRD](https://pek3b.qingstor.com/kubesphere-community/images/CRD-changhong.png) +![ CRD](https://pek3b.qingstor.com/kubesphere-community/images/CRD-changhong.png) ![修改 YAML 文件](https://pek3b.qingstor.com/kubesphere-community/images/yaml-changhong.png) diff --git a/content/zh/blogs/cilium-1.11-release.md b/content/zh/blogs/cilium-1.11-release.md new file mode 100644 index 000000000..1ae6596b2 --- /dev/null +++ b/content/zh/blogs/cilium-1.11-release.md @@ -0,0 +1,307 @@ +--- +title: 'Cilium 1.11 发布,带来内核级服务网格、拓扑感知路由....' +tag: 'Cilium' +keywords: 'Cilium, Service Mesh, Istio, Kubernetes' +description: '几天前,我们发布了具有诸多新功能的 Cilium 1.11 版本,这是一个令人兴奋的版本。诸多新功能中也包括了万众期待的 Cilium Service Mesh 的 Beta 版本。在本篇文章中,我们将深入探讨其中的部分新功能。' +createTime: '2021-12-14' +author: 'Cilium 母公司 Isovalent 团队' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202112141058286.png' +--- + +> 原文链接: https://isovalent.com/blog/post/2021-12-release-111 + +> **作者:Cilium 母公司 Isovalent 团队** +> **译者:范彬,狄卫华,米开朗基杨** +> +> 注:本文已取得作者本人的翻译授权! + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112141058286.png) + +Cilium 项目已逐渐成为万众瞩目之星,我们很自豪能够成为该项目的核心人员。几天前,我们发布了具有诸多新功能的 Cilium 1.11 版本,这是一个令人兴奋的版本。诸多新功能中也包括了万众期待的 Cilium Service Mesh 的 Beta 版本。在本篇文章中,我们将深入探讨其中的部分新功能。 + +## Service Mesh 测试版本(Beta) + +在探讨 1.11 版本之前,让我们先了解一下 Cilium 社区宣布的 Service Mesh 的新功能。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112141059276.png) + +- **基于 eBPF 技术的 Service Mesh (Beta)版本**: 定义了新的服务网格能力,这包括 L7 流量管理和负载均衡、TLS 终止、金丝雀发布、追踪等诸多能力。 +- **集成了 Kubernetes Ingress (Beta) 功能**: 通过将 eBPF 和 Envoy 的强势联合,实现了对 Kubernetes Ingress 的支持。 + +Cilium 网站的一篇文章详细介绍了[Service Mesh Beta 版本](https://cilium.io/blog/2021/12/01/cilium-service-mesh-beta),其中也包括了如何参与到该功能的开发。当前,这些 Beta 功能是 Cilium 项目中的一部分,在单独[分支](https://github.com/cilium/cilium/tree/beta/service-mesh)进行开发,可独立进行测试、反馈和修改,我们期待在 2022 年初 Cilium 1.12 版本发布之前合入到 Cilium 主分支。 + +## Cilium 1.11 + +Cilium 1.11 版本包括了 Kubernetes 额外功能,及独立部署的负载均衡器。 + +- **OpenTelemetry 支持**:Hubble L3-L7 可观测性数据支持 OpenTelemetry 跟踪和度量(Metrics)格式的导出。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#opentelemetry)) +- **Kubernetes APIServer 策略匹配**:新的策略实体用于简单方便地创建进出 Kubernetes API Server 流量的策略模型。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#kubernetes-apiserver-policy-matching)) +- **拓扑感知的路由**:增强负载均衡能力,基于拓扑感知将流量路由到最近的端点,或保持在同一个地区(Region)内。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#topology-aware-hints)) +- **BGP 宣告 Pod CIDR**:使用 BGP 在网络中宣告 Pod CIDR 的 IP 路由。([更多详情](https://isovalent.com/blog/post/2021-12-release-111#bgp-pod-cidr-announcements)) +- **服务后端流量的优雅终止**:支持优雅的连接终止,通过负载均衡向终止的 Pod 发送的流量可以正常处理后终止。([更多详情](https://isovalent.com/blog/post/2021-12-release-111#graceful-termination)) +- **主机防火墙稳定版**:主机防火墙功能已升级为生产可用的稳定版本。([更多详情](https://isovalent.com/blog/post/2021-12-release-111#feature-status)) +- **提高负载均衡器扩展性**:Cilium 负载均衡支持超过 64K 的后端端点。([更多详情](https://isovalent.com/blog/post/2021-12-release-111#service-backend-scalability)) +- **提高负载均衡器设备支持**:负载均衡的加速 XDP 快速路径现在支持 bond 设备([更多详情](https://isovalent.com/blog/post/2021-12-release-111#transparent-xdp-bonding-support)) 同时,可更普遍地用于多设备设置。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#xdp-multi-dev))。 +- **Kube-Proxy-replacement 支持 istio**:Cilium 的 kube-proxy 替代模式与 Istio sidecar 部署模式兼容。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#istio-kpr)) +- **Egress 出口网关的优化**:Egress 网关能力增强,支持其他数据路径模式。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#egress-gateway-improvements)) +- **托管 IPv4/IPv6 邻居发现**:对 Linux 内核和 Cilium 负载均衡器进行了扩展,删除了其内部 ARP 库,将 IPv4 的下一跳发现以及现在的 IPv6 节点委托给内核管理。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#managed-ipv4-ipv6-discovery)) +- **基于路由的设备检测**:外部网络设备基于路由的自动检测,以提高 Cilium 多设备设置的用户体验。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#route-based-device-detection)) +- **Kubernetes Cgroup 增强**:在 cgroup v2 模式下集成了 Cilium 的 kube-proxy-replacement 功能,同时,对 cgroup v1/v2 混合模式下的 Linux 内核进行了增强。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#kubernetes-cgroup)) +- **Cilium Endpoint Slices**:Cilium 基于 CRD 模式能够更加高效地与 Kubernetes 的控制平面交互,并且不需要专有 ETCD 实例,节点也可扩展到 1000+。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#cilium-endpoint-slices)) +- **集成 Mirantis Kubernetes 引擎**:支持 Mirantis Kubernetes 引擎。 ([更多详情](https://isovalent.com/blog/post/2021-12-release-111#mke-integration)) + +## 什么是 Cilium ? + +Cilium 是一个开源软件,为基于 Kubernetes 的 Linux 容器管理平台上部署的服务,透明地提供服务间的网络和 API 连接及安全。 + +Cilium 底层是基于 Linux 内核的新技术 eBPF,可以在 Linux 系统中动态注入强大的安全性、可见性和网络控制逻辑。 Cilium 基于 eBPF 提供了多集群路由、替代 kube-proxy 实现负载均衡、透明加密以及网络和服务安全等诸多功能。除了提供传统的网络安全之外,eBPF 的灵活性还支持应用协议和 DNS 请求/响应安全。同时,Cilium 与 Envoy 紧密集成,提供了基于 Go 的扩展框架。因为 eBPF 运行在 Linux 内核中,所以应用所有 Cilium 功能,无需对应用程序代码或容器配置进行任何更改。 + +请参阅 **[Cilium 简介]** 部分,了解 Cilium 更详细的介绍。 + +## OpenTelemetry 支持 + +![Jaeger UI 显示 Hubble 数据流](https://pek3b.qingstor.com/kubesphere-community/images/202112141059371.png) + +新版本增加了对 [OpenTelemetry](https://opentelemetry.io/) 的支持。 + +OpenTelemetry 是一个 CNCF 项目,定义了遥测协议和数据格式,涵盖了分布式跟踪、指标和日志。该项目提供了 SDK 和运行在 Kubernetes 上的收集器。通常,应用程序直接检测暴露 OpenTelemetry 数据,这种检测最常使用 OpenTelemetry SDK 在应用程序内实现。OpenTelemetry 收集器用于从集群中的各种应用程序收集数据,并将其发送到一个或多个后端。CNCF 项目 [Jaeger](https://jaegertracing.io/) 是可用于存储和呈现跟踪数据的后端之一。 + +[支持 OpenTelemetry 的 Hubble 适配器](https://github.com/cilium/hubble-otel)是一个附加组件,可以部署到运行 Cilium 的集群上(Cilium 版本最好是 1.11,当然也应该适用于旧版本)。该适配器是一个嵌入了 Hubble 接收器的 OpenTelemetry 收集器,我们推荐使用 [OpenTelemetry Operator](https://github.com/open-telemetry/opentelemetry-operator) 进行部署(详见[用户指南](https://github.com/cilium/hubble-otel/blob/main/USER_GUIDE_KIND.md))。Hubble 适配器从 Hubble 读取流量数据,并将其转换为跟踪和日志数据。 + +Hubble 适配器添加到支持 OpenTelemetry 的集群中,可对网络事件和应用级别遥测提供有价值的可观测性。当前版本通过 OpenTelemetry SDK 提供了 HTTP 流量和 spans 的关联。 + +## 感知拓扑的负载均衡 + +![感知拓扑的负载均衡](https://pek3b.qingstor.com/kubesphere-community/images/202112141059950.png) + +Kubernetes 集群在跨多数据中心或可用区部署是很常见的。这不仅带来了高可用性好处,而且也带来了一些操作上的复杂性。 + +目前为止,Kubernetes 还没有一个内置的结构,可以基于拓扑级别描述 Kubernetes service 端点的位置。这意味着,Kubernetes 节点基于服务负载均衡决策选择的 service 端点,可能与请求服务的客户在不同的可用区。 这种场景下会带来诸多副作用,可能是云服务费用增加,通常由于流量跨越多个可用区,云提供商会额外收取费用,或请求延迟增加。更广泛地说,我们需要根据拓扑结构定义 service 端点的位置, 例如,服务流量应该在同一节点(node)、同一机架(rack)、同一故障分区(zone)、同一故障地区(region)、同云提供商的端点之间进行负载均衡。 + +Kubernetes v1.21 引入了一个称为[拓扑感知路由](https://kubernetes.io/docs/concepts/services-networking/topology-aware-hints)的功能来解决这个限制。通过将 `service.kubernetes.io/topology-aware-hints` 注解被设置为 `auto` ,在 service 的 EndpointSlice 对象中设置端点提示,提示端点运行的分区。分区名从节点的 topology.kubernetes.io/zone 标签获取。 如果两个节点的分区标签值相同,则被认为处于同一拓扑级别。 + +该提示会被 Cilium 的 kube-proxy 替代来处理,并会根据 EndpointSlice 控制器设置的提示来过滤路由的端点,让负载均衡器优先选择同一分区的端点。 + +该 Kubernetes 功能目前处于 Alpha 阶段,因此需要使用--feature-gate 启用。更多信息请参考[官方文档](https://kubernetes.io/docs/concepts/services-networking/topology-aware-hints)。 + +## Kubernetes APIServer 策略匹配 + +![Hubble UI 显示 apiserver 网络策略](https://pek3b.qingstor.com/kubesphere-community/images/202112141059766.png) + +托管 Kubernetes 环境,如 GKE、EKS 和 AKS 上,kube-apiserver 的 IP 地址是不透明的。在以前的 Cilium 版本中,没有提供正规的方式来编写 Cilium 网络策略,定义对 kube-apiserver 的访问控制。这涉及一些实现细节,如:Cilium 安全身份分配,kube-apiserver 是部署在集群内,还是部署在集群外。 + +为了解决这个问题,Cilium 1.11 增加了新功能,为用户提供一种方法,允许使用专用策略对象定义进出 apiserver 流量的访问控制。该功能的底层是实体选择器,能够解析预留的 kube-apiserver 标签含义, 并可自动应用在与 kube-apiserver 关联的 IP 地址上。 + +安全团队将对这个新功能特别感兴趣,因为它提供了一个简单的方法来定义对 pod 的 Cilium 网络策略,允许或禁止访问 kube-apiserver。下面 CiliumNetworkPolicy 策略片段定义了 kube-system 命名空间内的所有 Cilium 端点允许访问 kube-apiserver, 除此之外的所有 Cilium 端点禁止访问 kube-apiserver。 + +```yaml +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: allow-to-apiserver + namespace: kube-system +spec: + endpointSelector: {} + egress: + - toEntities: + - kube-apiserver +``` + +## BGP 宣告 Pod CIDR + +![BGP 宣告 Pod CIDR 的示意图](https://pek3b.qingstor.com/kubesphere-community/images/202112141100068.png) + +随着私有 Kubernetes 环境的日益关注,我们希望与现存的数据中心网络基础设施很好地集成,它们通常是基于 BGP 协议进行路由分发的。 在上一个版本中,Cilium agent 已经开始集成了 BGP 协议, 可以通过 BGP 向 BGP 路由器[发布负载均衡器的 service 的 VIP](https://cilium.io/blog/2021/05/20/cilium-110#bgp)。 + +现在,Cilium 1.11 版本还引入了通过 BGP 宣告 Kubernetes Pod 子网的能力。Cilium 可与任何下游互联的 BGP 基础设施创建一个 BGP 对等体,并通告分配的 Pod IP 地址的子网。这样下游基础设施就可以按照合适方式来分发这些路由,以使数据中心能够通过各种私有/公共下一跳路由到 Pod 子网。 + +要开始使用此功能,运行 Cilium 的 Kubernetes 节点需要读取 BGP 的 ConfigMap 设置: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: bgp-config + namespace: kube-system +data: + config.yaml: | + peers: + - peer-address: 192.168.1.11 + peer-asn: 64512 + my-asn: 64512 +``` + +同时,Cilium 应该使用以下参数进行安装 : + +```bash +$ cilium install \ + --config="bgp-announce-pod-cidr=true" +``` + +Cilium 安装完后,它将会向 BGP 路由器发布 Pod CIDR 范围,即 `192.168.1.11`。 + +下面是最近的 Cilium [eCHO episode](https://www.youtube.com/watch?v=nsfbFUO8eu4&t=668s) 完整演示视频。 + +- https://www.youtube.com/watch?v=nsfbFUO8eu4 + +如果想了解更多,如:如何为 Kubernetes service 配置 LoadBalancer IP 宣告,如何通过 BGP 发布节点的 Pod CIDR 范围,请参见 [docs.cilium.io](https://docs.cilium.io/en/latest/gettingstarted/bgp/)。 + +## 托管 IPv4/IPv6 邻居发现 + +![托管邻居发现的示意图](https://pek3b.qingstor.com/kubesphere-community/images/202112141100742.png) + +当 Cilium 启用 eBPF 替代 kube-proxy 时,Cilium 会执行集群节点的邻居发现,以收集网络中直接邻居或下一跳的 L2 地址。这是服务负载平衡所必需的,[eXpress Data Path](https://cilium.io/blog/2020/06/22/cilium-18#kube-proxy-replacement-at-the-xdp-layer) (XDP) 快速路径支持每秒数百万数据包的可靠高流量速率。在这种模式下,在技术上按需动态解析是不可能的,因为它需要等待相邻的后端被解析。 + +在 Cilium 1.10 及更早版本中,cilium agent 本身包含一个 ARP 解析库,其控制器触发发现和定期刷新新增集群节点。手动解析的邻居条目被推送到内核并刷新为 PERMANENT 条目,eBPF 负载均衡器检索这些条目,将流量定向到后端。cilium agent 的 ARP 解析库缺乏对 IPv6 邻居解析支持,并且,PERMANENT 邻居条目还有许多问题:举个例子,条目可能变得陈旧,内核拒绝学习地址更新,因为它们本质上是静态的,在某些情况下导致数据包在节点间被丢弃。此外,将邻居解析紧密耦合到 cilium agent 也有一个缺点,在 agent 启停周期时,不会发生地址更新的学习。 + +在 Cilium 1.11 中,邻居发现功能已被完全重新设计,Cilium 内部的 ARP 解析库已从 agent 中完全移除。现在 agent 依赖于 Linux 内核来发现下一跳或同一 L2 域中的主机。Cilium 现在可同时支持 IPv4 和 IPv6 邻居发现。对于 v5.16 或更新的内核,我们已经将今年 Linux Plumbers 会议期间共同组织的 [BPF & Networking Summit](https://lore.kernel.org/netdev/20211011121238.25542-1-daniel@iogearbox.net/) 上,提出的“管理”邻居条目工作提交到上游([part1](https://lore.kernel.org/netdev/20211011121238.25542-1-daniel@iogearbox.net/), [part2](https://lore.kernel.org/netdev/20211013132140.11143-1-daniel@iogearbox.net/), [part3](https://lore.kernel.org/netdev/20211025154728.2161-1-daniel@iogearbox.net/))。在这种情况下,agent 将新增集群节点的 L3 地址下推,并触发内核定期自动解析它们对应的 L2 地址。 + +这些邻居条目作为“外部学习”和“管理”邻居属性,基于 netlink 被推送到内核中。虽然旧属性确保在压力下这些邻居条目不会被内核的垃圾收集器处理,但是 "管理" 邻居属性会,在可行的情况下,内核需要自动将这些邻居属性保持在 `REACHABLE` 状态。这意思是,如果节点的上层堆栈没有主动向后端节点发送或接收流量,内核可以重新学习,将邻居属性保持在 `REACHABLE` 状态,然后通过内部内核工作队列定期触发显式邻居解析。对于没有 "管理" 邻居属性功能的旧内核,如果需要,agent controller 将定期督促内核触发新的解决方案。因此,Cilium 不再有 `PERMANENT` 邻居条目,并且在升级时,agent 将自动将旧条目迁移到动态邻居条目中,以使内核能够在其中学习地址更新。 + +此外,在多路径路由的情况下,agent 会做负载均衡,它现在可以在路由查找中查看失败的下一跳。这意味着,不是替代所有的路由,而是通过查看相邻子系统信息来避免失败的路径。总的来说,对于 Cilium agent 来说,这项修正工作显著促进了邻居管理,并且在网络中的节点或下一跳的邻居地址发生变化时,数据路径更易变化。 + +## XDP 多设备负载均衡器 + +![XDP多设备负载均衡器的示意图](https://pek3b.qingstor.com/kubesphere-community/images/202112141100694.png) + +在此版本前,基于 XDP 的负载均衡器加速只能在单个网络设备上启用,以发夹方式(hair-pinning)运行,即数据包转发离开的设备与数据包到达的设备相同。这个最初的限制是在[基于 XDP 层的 kube-proxy 替代](https://cilium.io/blog/2020/06/22/cilium-18#kube-proxy-replacement-at-the-xdp-layer)加速中加入的,原因是在 XDP(`XDP_REDIRECT`)下对多设备转发的驱动支持有限,而同设备转发(`XDP_TX`)是 Linux 内核中每个驱动的 XDP 都支持的。 + +这意味着多网络设备的环境下,我们需要使用 tc eBPF 机制,所以必须使用 Cilium 的常规 kube-proxy 替代。这种环境的一个典型例子是有两个网络设备的主机,其中一个是公网,接受来自外部对 Kubernetes service 的请求,而另一个是私有网络,用于 Kubernetes 节点之间的集群内通信。 + +由于在现代 LTS Linux 内核上,绝大多数 40G 和 100G 以上的上游网卡驱动都支持开箱即用的 `XDP_REDIRECT`,这种限制终于可以解除,因此,这个版本在 Cilium 的 kube-proxy 替代,及 Cilium 的独立负载均衡器上,实现了 XDP 层的多网络设备的负载均衡,这使得在更复杂的环境中也能保持数据包处理性能。 + +## XDP 透明支持 bond 设备 + +[![XDP透明支持 bond 设备](https://kubesphere-community.pek3b.qingstor.com/images/images/lb-xdp-bond.gif)](https://asciinema.org/a/zphe3HfuwbWb6Vu6Un3aTAbRj) + +在很多企业内部或云环境中,节点通常使用 bond 设备,设置外部流量的双端口网卡。随着最近 Cilium 版本的优化,如在 XDP 层的[ kube-proxy 替代](https://cilium.io/blog/2020/06/22/cilium-18#kube-proxy-replacement-at-the-xdp-layer) 或[独立负载均衡器](https://cilium.io/blog/2021/05/20/cilium-110#standalonelb),我们从用户那里经常收到的一个问题是 XDP 加速是否可以与 bond 网络设备结合使用。虽然 Linux 内核绝大多数 10/40/100Gbit/s 网络驱动程序支持 XDP,但它缺乏在 bond(和 802.3ad)模式下透明操作 XDP 的能力。 + +其中的一个选择是在用户空间实现 802.3ad,并在 XDP 程序实现 bond 负载均衡,但这对 bond 设备管理是一个相当颇为繁琐努力,例如:对 netlink 链路事件的观测,另外还需要为编排器的本地和 bond 分别提供单独的程序。相反,本地内核实现解决了这些问题,提供了更多的灵活性,并且能够处理 eBPF 程序,而不需要改变或重新编译它们。内核负责管理 bond 设备组,可以自动传播 eBPF 程序。对于 v5.15 或更新的内核,我们已经在上游([part1](https://lore.kernel.org/bpf/20210731055738.16820-1-joamaki@gmail.com/), [part2](https://lore.kernel.org/netdev/20210906085638.1027202-1-joamaki@gmail.com/))实现了 XDP 对 bond 设备的支持。 + +当 XDP 程序连接到 bond 设备时,`XDP_TX` 的语义等同于 tc eBPF 程序附加到 bond 设备,这意味着从 bond 设备传输数据包使用 bond 配置的传输方法来选择从属设备。故障转移和链路聚合模式均可以在 XDP 操作下使用。对于通过 `XDP_TX` 将数据包从 bond 设备上回传,我们实现了轮循、主动备份、802.3ad 以及哈希设备选择。这种情况对于像 Cilium 这样的发夹式负载均衡器来说特别有意义。 + +## 基于路由的设备检测 + +1.11 版本显著的提升了设备的自动检测,可用于[ 使用 eBPF 替代 kube-proxy](https://cilium.io/blog/2020/06/22/cilium-18#kube-proxy-replacement-at-the-xdp-layer)、[带宽管理器](https://cilium.io/blog/2020/11/10/cilium-19#bwmanager)和[主机防火墙](https://cilium.io/blog/2020/06/22/cilium-18#policy)。 + +在早期版本中,Cilium 自动检测的设备需要有默认路由的设备,和有 Kubernetes NodeIP 的设备。展望未来,现在设备检测是根据主机命名空间的所有路由表的条目来进行的。也就是说,所有非桥接的、非 bond 的和有全局单播路由的非虚拟的设备,现在都能被检测到。 + +通过这项改进,Cilium 现在应该能够在更复杂的网络设置中自动检测正确的设备,而无需使用 `devices` 选项手动指定设备。使用后一个选项时,无法对设备名称进行一致性的命名规范,例如:无法使用共同前缀正则表达式对设备命名。 + +## 服务后端流量的优雅终止 + +![优雅终止的示意图](https://kubesphere-community.pek3b.qingstor.com/images/images/2021-11-cilium-11-graceful.gif) + +Kubernetes 可以出于多种原因终止 Pod,如滚动更新、缩容或用户发起的删除。在这种情况下,重要的是要优雅地终止与 Pod 的活跃连接,让应用程序有时间完成请求以最大程度地减少中断。异常连接终止会导致数据丢失,或延迟应用程序的恢复。 + +Cilium agent 通过 "EndpointSlice" API 监听 service 端点更新。当一个 service 端点被终止时,Kubernetes 为该端点设置 `terminating` 状态。然后,Cilium agent 删除该端点的数据路径状态,这样端点就不会被选择用于新的请求,但该端点正在服务的当前连接,可以在用户定义的宽限期内被终止。 + +同时,Kubernetes 告知容器运行时向服务的 Pod 容器发送 `SIGTERM` 信号,并等待终止宽限期的到来。然后,容器应用程序可以启动活跃连接的优雅终止,例如,关闭 TCP 套接字。一旦宽限期结束,Kubernetes 最终通过 `SIGKILL` 信号对仍在 Pod 容器中运行的进程触发强制关闭。这时,agent 也会收到端点的删除事件,然后完全删除端点的数据路径状态。但是,如果应用 Pod 在宽限期结束前退出,Kubernetes 将立即发送删除事件,而不管宽限期设置。 + +更多细节请关注 [docs.cilium.io](https://docs.cilium.io/en/v1.11/gettingstarted/kubeproxy-free/#graceful-termination) 中的指南。 + +## Egress 出口网关的优化 + +![Egress 出口网关的优化](https://pek3b.qingstor.com/kubesphere-community/images/202112141104906.png) + +简单的场景中,Kubernetes 应用只与其他 Kubernetes 应用进行通信,因此流量可通过网络策略等机制进行控制。但现实世界情况并非总是如此,例如:私有部署的一些应用程序没有被容器化,Kubernetes 应用程序需要与集群外的服务进行通信。这些传统服务通常配置的是静态 IP,并受到防火墙规则的保护。那么在此种情况下,应该如何对流量控制和审计呢? + +Egress 出口 IP 网关功能在 [Cilium 1.10](https://cilium.io/blog/2021/05/20/cilium-110#egressgateway) 中被引入,通过 Kubernetes 节点充当网关用于集群出口流量来解决这类问题。用户使用策略来指定哪些流量应该被转发到网关节点,以及如何转发流量。这种情况下,网关节点将使用静态出口 IP 对流量进行伪装,因此可以在传统防火墙建立规则。 + +```yaml +apiVersion: cilium.io/v2alpha1 +kind: CiliumEgressNATPolicy +metadata: + name: egress-sample +spec: + egress: + - podSelector: + matchLabels: + app: test-app + destinationCIDRs: + - 1.2.3.0/24 + egressSourceIP: 20.0.0.1 +``` + +在上面的示例策略中,带有 `app: test-app` 标签的 Pod 和目标 CIDR 为 `1.2.3.0/24` 的流量,需要通过 `20.0.0.1` 网关节点的出口 IP(SNAT)与集群外部通信。 + +在 Cilium 1.11 开发周期中,我们投入了大量精力来稳定出口网关功能,使其可投入生产。现在, +出口网关现在可以工作在[直接路由](https://github.com/cilium/cilium/pull/17517),[区分内部流量](https://github.com/cilium/cilium/pull/17639)(即 Kubernetes 重叠地址的 CIDR 的出口策略)及[在不同策略中使用相同出口 IP](https://github.com/cilium/cilium/pull/17773)下。一些问题,如[回复被错误描述为出口流量](https://github.com/cilium/cilium/pull/17869)和[其他](https://github.com/cilium/cilium/pull/17813)等已经修复,同时测试也得到了改进,以便及早发现潜在的问题。 + +## Kubernetes Cgroup 增强 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112111457114.png) + +Cilium 使用 [ eBPF 替代 kube-proxy ](https://cilium.io/blog/2020/06/22/cilium-18#kube-proxy-replacement-at-the-xdp-layer) 作为[独立负载均衡器](https://cilium.io/blog/2021/05/20/cilium-110#standalonelb)的优势之一是能够将 eBPF 程序附加到 socket hooks 上,例如 `connect(2)`、`bind(2)`、`sendmsg(2)` 以及其他各种相关的系统调用,以便透明地将本地的应用程序连接到后端服务。但是这些程序只能被附加到 cgroup v2。虽然 Kubernetes 正在努力迁移到 cgroup v2,但目前绝大多数用户的环境都是 cgroup v1 和 v2 混合使用。 + +Linux 在内核的 socket 对象中标记了 socket 与 cgroup 的关系,并且由于 6 年前的一个设定,cgroup v1 和 v2 的 socket 标签是互斥的。这就意味着,如果一个 socket 是以 cgroup v2 成员身份创建的,但后来通过具有 cgroup v1 成员身份的 `net_prio` 或 `net_cls` 控制器进行了标记,那么 cgroup v2 就不会执行附加在 Pod 子路径上的程序,而是回退执行附加到 cgroup v2 层次结构 (hierarchy) 根部的 eBPF 程序。这样就会导致一个很严重的后果,如果连 cgroup v2 根部都没有附加程序,那么整个 cgroup v2 层次结构 (hierarchy) 都会被绕过。 + +现如今,cgroup v1 和 v2 不能并行运行的假设不再成立,具体可参考今年早些时候的 [Linux Plumbers 会议演讲](https://linuxplumbersconf.org/event/11/contributions/953/)。只有在极少数情况下,当被标记为 cgroup v2 成员身份的 eBPF 程序附加到 cgroup v2 层次结构的子系统时,Kubernetes 集群中的 cgroup v1 网络控制器才会绕过该 eBPF 程序。为了在数据包处理路径上的尽量前面(early)的位置解决这个问题,Cilium 团队最近对 Linux 内核进行了修复,实现了在所有场景下允许两种 cgroup 版本 ([part1](https://lore.kernel.org/bpf/20210913230759.2313-1-daniel@iogearbox.net/), [part2](https://lore.kernel.org/bpf/20210927123921.21535-1-daniel@iogearbox.net/)) 之间相互安全操作。这个修复不仅使 Cilium 的 cgroup 操作完全健壮可靠,而且也造福了 Kubernetes 中所有其他 eBPF cgroup 用户。 + +此外,Kubernetes 和 Docker 等容器运行时最近开始陆续宣布支持 cgroup v2。在 cgroup v2 模式下,Docker 默认会切换到[私有 cgroup 命名空间](https://docs.docker.com/config/containers/runmetrics/#running-docker-on-cgroup-v2),即每个容器(包括 Cilium)都在自己的私有 cgroup 命名空间中运行。Cilium 通过确保 eBPF 程序附加到正确的 cgroup 层次结构的 socket hooks 上,使 Cilium 基于套接字的负载均衡在 cgroup v2 环境中能正常工作。 + +## 增强负载均衡器的可扩展性 + +> 主要外部贡献者:Weilong Cui (Google) + +最近的测试表明,对于运行着 Cilium 且 Kubernetes Endpoints 超过 6.4 万的大型 Kubernetes 环境,Service 负载均衡器会受到限制。有两个限制因素: + +- Cilium 使用 eBPF 替代 kube-proxy 的独立负载均衡器的本地后端 ID 分配器仍被限制在 16 位 ID 空间内。 +- Cilium 用于 IPv4 和 IPv6 的 eBPF datapath 后端映射所使用的密钥类型被限制在 16 位 ID 空间内。 + +为了使 Kubernetes 集群能够扩展到超过 6.4 万 Endpoints,Cilium 的 ID 分配器以及相关的 datapath 结构已被转换为使用 32 位 ID 空间。 + +## Cilium Endpoint Slices + +> 主要外部贡献者:Weilong Cui (Google), Gobinath Krishnamoorthy (Google) + +在 1.11 版本中,Cilium 增加了对新操作模式的支持,该模式通过更有效的 Pod 信息广播方式大大提高了 Cilium 的扩展能力。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112111507157.png) + +之前,Cilium 通过 watch `CiliumEndpoint (CEP)` 对象来广播 Pod 的 IP 地址和安全身份信息,这种做法在可扩展性方面会带来一定的挑战。每个 `CEP` 对象的创建/更新/删除都会触发 watch 事件的组播,其规模与集群中 Cilium-agent 的数量呈线性关系,而每个 Cilium-agent 都可以触发这样的扇出动作。如果集群中有 `N` 个节点,总 watch 事件和流量可能会以 `N^2` 的速率二次扩展。 + +Cilium 1.11 引入了一个新的 CRD `CiliumEndpointSlice (CES)`,来自同一命名空间下的 `CEPs` 切片会被 Operator 组合成 `CES` 对象。在这种模式下,Cilium-agents 不再 watch `CEP`,而是 watch `CES`,大大减少了需要经由 `kube-apiserver` 广播的 watch 事件和流量,进而减轻 `kube-apiserver` 的压力,增强 Cilium 的可扩展性。 + +由于 `CEP` 大大减轻了 `kube-apiserver` 的压力,Cilium 现在已经不再依赖专用的 ETCD 实例(KVStore 模式)。对于 Pod 数量剧烈变化的集群,我们仍然建议使用 KVStore,以便将 `kube-apiserver` 中的处理工作卸载到 ETCD 实例中。 + +这种模式权衡了“更快地传播 Endpoint 信息”和“扩展性更强的控制平面”这两个方面,并力求雨露均沾。注意,与 `CEP` 模式相比,在规模较大时,如果 Pod 数量剧烈变化(例如大规模扩缩容),可能会产生较高的 Endpoint 信息传播延迟,从而影响到远程节点。 + +GKE 最早采用了 `CES`,我们在 GKE 上进行了一系列“最坏情况”规模测试,发现 Cilium 在 `CES` 模式下的扩展性要比 `CEP` 模式强很多。从 1000 节点规模的负载测试来看,启用 `CES` 后,watch 事件的峰值从 CEP 的 `18k/s` 降低到 CES 的 `8k/s`,watch 流量峰值从 CEP 的 `36.6Mbps` 降低到 CES 的 `18.1Mbps`。在控制器节点资源使用方面,它将 CPU 的峰值使用量从 28 核/秒减少到 10.5 核/秒。 + +![GKE 中 1000 个节点规模的负载测试中的 CEP watch 事件和流量](https://pek3b.qingstor.com/kubesphere-community/images/202112111548337.png) + +![GKE 中 1000 个节点规模的负载测试中的 CEP 和 CES watch 事件和流量](https://pek3b.qingstor.com/kubesphere-community/images/202112111549767.png) + +详情请参考 [Cilium 官方文档](https://docs.cilium.io/en/v1.11/gettingstarted/ciliumendpointslice/)。 + +## Kube-Proxy-Replacement 支持 Istio + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112111554971.png) + +许多用户在 Kubernetes 中使用 eBPF 自带的负载均衡器来[替代 kube-proxy](https://docs.cilium.io/en/latest/gettingstarted/kubeproxy-free/),享受基于 eBPF 的 datapath 带来的[高效处理方式](https://docs.cilium.io/en/v1.10/operations/performance/tuning/#tuning-guide),避免了 kube-proxy 随着集群规模线性增长的 iptables 规则链条。 + +eBPF 对 Kubernetes Service 负载均衡的处理在架构上分为[两个部分](https://cilium.io/blog/2020/06/22/cilium-18#kubeproxy-removal): + +- 处理进入集群的外部服务流量(南北方向) +- 处理来自集群内部的服务流量(东西方向) + +在 eBPF 的加持下,Cilium 在南北方向已经实现了尽可能靠近驱动层(例如通过 XDP)完成对每个数据包的处理;东西流量的处理则尽可能靠近 eBPF 应用层,处理方式是将应用程序的请求(例如 TCP `connect(2)`)从 Service 虚拟 IP 直接“连接”到后端 IP 之一,以避免每个数据包的 NAT 转换成本。 + +Cilium 的这种处理方式适用于大多数场景,但还是有一些例外,比如常见的服务网格解决方案(Istio 等)。Istio 依赖 iptables 在 Pod 的网络命名空间中插入额外的重定向规则,以便应用流量在离开 Pod 进入主机命名空间之前首先到达代理 Sidecar(例如 Envoy),然后通过 `SO_ORIGINAL_DST` 从其内部 socket 直接[查询](https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/listener_filters/original_dst_filter) Netfilter 连接跟踪器,以收集原始服务目的地址。 + +所以在 Istio 等服务网格场景下,Cilium 改进了对 Pod 之间(东西方向)流量的处理方式,改成基于 eBPF 的 DNAT 完成对每个数据包的处理,而主机命名空间内的应用仍然可以使用基于 socket 的负载均衡器,以避免每个数据包的 NAT 转换成本。 + +要想开启这个特性,只需在新版 Cilium agent 的 Helm Chart 中设置 `bpf-lb-sock-hostns-only: true` 即可。详细步骤请参考 [Cilium 官方文档](https://docs.cilium.io/en/latest/gettingstarted/kubeproxy-free/#socket-loadbalancer-bypass-in-pod-namespace)。 + +## 特性增强与弃用 + +以下特性得到了进一步增强: + +- **主机防火墙** (Host Firewall) 从测试版 (beta) 转为稳定版 (stable)。主机防火墙通过允许 [CiliumClusterwideNetworkPolicies](https://CiliumClusterwideNetworkPolicies) 选择集群节点来保护主机网络命名空间。自从引入主机防火墙功能以来,我们已经大大增加了测试覆盖率,并修复了部分错误。我们还收到了部分社区用户的反馈,他们对这个功能很满意,并准备用于生产环境。 + +以下特性已经被弃用: + +- **Consul** 之前可以作为 Cilium 的 KVStore 后端,现已被弃用,推荐使用更经得起考验的 Etcd 和 Kubernetes 作为 KVStore 后端。之前 Cilium 的开发者主要使用 Consul 进行本地端到端测试,但在最近的开发周期中,已经可以直接使用 Kubernetes 作为后端来测试了,Consul 可以退休了。 +- **IPVLAN** 之前用来作为 veth 的替代方案,用于提供跨节点 Pod 网络通信。在 Cilium 社区的推动下,大大改进了 Linux 内核的性能,目前 veth 已经和 IPVLAN 性能相当。具体可参考这篇文章:[eBPF 主机路由](https://cilium.io/blog/2020/11/10/cilium-19#veth)。 +- **策略追踪 (Policy Tracing)** 在早期 Cilium 版本中被很多 Cilium 用户使用,可以通过 Pod 中的命令行工具 `cilium policy trace` 来执行。然而随着时间的推移,它没有跟上 Cilium 策略引擎的功能进展。Cilium 现在提供了更好的工具来追踪 Cilium 的策略,例如[网络策略编辑器](https://app.networkpolicy.io/)和 [Policy Verdicts](https://cilium.io/blog/2020/06/22/cilium-18#policyverdicts)。 \ No newline at end of file diff --git a/content/zh/blogs/create-pipeline-across-multi-clusters.md b/content/zh/blogs/create-pipeline-across-multi-clusters.md index 2de385f25..834253eaf 100644 --- a/content/zh/blogs/create-pipeline-across-multi-clusters.md +++ b/content/zh/blogs/create-pipeline-across-multi-clusters.md @@ -121,7 +121,7 @@ pipeline { REGISTRY = 'docker.io' DOCKERHUB_NAMESPACE = 'shaowenchen' - APP_NAME = 'devops-java-sample' + APP_NAME = 'devops-maven-sample' SONAR_CREDENTIAL_ID = 'sonar-token' TAG_NAME = "SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER" } @@ -129,16 +129,15 @@ pipeline { stage('checkout') { steps { container('maven') { - git branch: 'master', url: 'https://github.com/kubesphere/devops-java-sample.git' + git branch: 'master', url: 'https://github.com/kubesphere/devops-maven-sample.git' } } } stage('unit test') { steps { container('maven') { - sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test' + sh 'mvn clean test' } - } } stage('sonarqube analysis') { @@ -146,7 +145,7 @@ pipeline { container('maven') { withCredentials([string(credentialsId: "$SONAR_CREDENTIAL_ID", variable: 'SONAR_TOKEN')]) { withSonarQubeEnv('sonar') { - sh "mvn sonar:sonar -o -gs `pwd`/configuration/settings.xml -Dsonar.login=$SONAR_TOKEN" + sh "mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN" } } @@ -157,7 +156,7 @@ pipeline { stage('build & push') { steps { container('maven') { - sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package' + sh 'mvn -Dmaven.test.skip=true clean package' sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' @@ -179,19 +178,37 @@ pipeline { } stage('deploy to dev') { steps { - kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$DEV_KUBECONFIG_CREDENTIAL_ID") + withCredentials([ + kubeconfigFile( + credentialsId: env.DEV_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/dev-all-in-one/devops-sample.yaml | kubectl apply -f -' + } } } stage('deploy to staging') { steps { input(id: 'deploy-to-staging', message: 'deploy to staging?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$TEST_KUBECONFIG_CREDENTIAL_ID") + withCredentials([ + kubeconfigFile( + credentialsId: env.TEST_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } } } stage('deploy to production') { steps { input(id: 'deploy-to-production', message: 'deploy to production?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$PROD_KUBECONFIG_CREDENTIAL_ID") + withCredentials([ + kubeconfigFile( + credentialsId: env.PROD_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } } } } diff --git a/content/zh/blogs/debugging-envoy-and-istio.md b/content/zh/blogs/debugging-envoy-and-istio.md new file mode 100644 index 000000000..a253a44e3 --- /dev/null +++ b/content/zh/blogs/debugging-envoy-and-istio.md @@ -0,0 +1,741 @@ +--- +title: 'Istio 无法访问外部服务的故障排查' +tag: 'Kubernetes,Istio,Envoy' +keywords: 'Kubernetes, Istio, Envoy, Sidecar, istio-proxy' +description: '本文以一次生产事故为例,详细分析了 Istio 无法访问外部服务的原因,并给出了多种可选的解决方案。' +createTime: '2021-09-17' +author: '龙小虾' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/istio-debug.png' +--- + +## 事故起因 + +业务上新集群,本来以为"洒洒水",11 点切,12 点就能在家睡觉了。流量切过来后,在验证过程中,发现网页能够正常打开,在登录时返回了 502,当场懵逼。在相关的容器日志发现一个高频的报错条目“7000 端口无法连接”,向业务组了解到这是 redis 集群中的一个端口,前后端是通过 redis 交互的,该集群同时还有 7001-7003 其它三个端口。 + +用 nc 命令对 redis 集群进行连接测试:向服务端发送 `keys *` 命令时,7000 端口返回的是 `HTTP/1.1 400 Bad Request`,其他三个端口是 redis 返回的 `-NOAUTH Authentication required`。 + +```bash +$ nc 10.0.0.6 7000 +keys * +HTTP/1.1 400 Bad Request +content-length: 0 +connection: close + +$ nc 10.0.0.6 7003 +keys * +-NOAUTH Authentication required +``` + +判断 7000 端口连接到了其他应用上,至少不是 redis。在宿主机上抓包发现没有抓到访问 7000 端口的流量,然后查看容器的 nf_conntrackb 表,发现 7000 端口的数据只有到本地的会话信息;7003 的有两条会话信息,一条到本机的,一条到目标服务器的。 + +```bash +$ grep 7000 /proc/net/nf_conntrack +ipv4 2 tcp 6 110 TIME_WAIT src=10.64.192.14 dst=10.0.0.6 sport=50498 dport=7000 src=127.0.0.1 dst=10.64.192.14 sport=15001 dport=50498 [ASSURED] mark=0 zone=0 use=2 + +$ grep 7003 /proc/net/nf_conntrack +ipv4 2 tcp 6 104 TIME_WAIT src=10.64.192.14 dst=10.0.0.6 sport=38952 dport=7003 src=127.0.0.1 dst=10.64.192.14 sport=15001 dport=38952 [ASSURED] mark=0 zone=0 use=2 +ipv4 2 tcp 6 104 TIME_WAIT src=10.64.192.14 dst=10.0.0.6 sport=38954 dport=7003 src=10.0.0.6 dst=10.64.192.14 sport=7003 dport=38954 [ASSURED] mark=0 zone=0 use=2 +``` + +由此判断出 istio 没有代理转发出 7000 的流量,这突然就触及到了我的知识盲区,一大堆人看着,办公室 26 度的空调,一直在冒汗。没办法了,在与业务商量后,只能先关闭 istio 注入,优先恢复了业务。回去后恶补 istio 的相关资料。终于将问题解决。记录下相关信息,以供日后参考。 + +## 背景知识补充 + +### istio Sidecar 的模式 + +istio 的 Sidecar 有两种模式: + +- **ALLOW_ANY**:istio 代理允许调用未知的服务,黑名单模式。 +- **REGISTRY_ONLY**:istio 代理会阻止任何没有在网格中定义的 HTTP 服务或 service entry 的主机,白名单模式。 + +### istio-proxy(Envoy)的配置结构 + +istio-proxy(Envoy)的代理信息大体由以下几个部分组成: + +- **Cluster**:在 Envoy 中,Cluster 是一个服务集群,Cluster 中包含一个到多个 endpoint,每个 endpoint 都可以提供服务,Envoy 根据负载均衡算法将请求发送到这些 endpoint 中。cluster 分为 inbound 和 outbound 两种,前者对应 Envoy 所在节点上的服务;后者占了绝大多数,对应 Envoy 所在节点的外部服务。可以使用如下方式分别查看 inbound 和 outbound 的 cluster。 +- **Listeners**:Envoy 采用 listener 来接收并处理 downstream 发过来的请求,可以直接与 Cluster 关联,也可以通过 rds 配置路由规则(Routes),然后在路由规则中再根据不同的请求目的地对请求进行精细化的处理。 +- **Routes**:配置 Envoy 的路由规则。istio 下发的缺省路由规则中对每个端口(服务)设置了一个路由规则,根据 host 来对请求进行路由分发,routes 的目的为其他服务的 cluster。 +- **Endpoint**:cludter 对应的后端服务,可以通过 istio pc endpoint 查看 inbound 和 outbound 对应的 endpoint 信息。 + +### 服务发现类型 + +cluster 的服务发现类型主要有: + +- **ORIGINAL_DST**:类型的 Cluster,Envoy 在转发请求时会直接采用 downstream 请求中的原始目的地 IP 地址 +- **EDS**:EDS 获取到该 Cluster 中所有可用的 Endpoint,并根据负载均衡算法(缺省为 Round Robin)将 Downstream 发来的请求发送到不同的 Endpoint。**istio 会自动为集群中的 service 创建代理信息,listener 的信息从 service 获取,对应的 cluster 被标记为 EDS 类型** +- **STATIC**:缺省值,在集群中列出所有可代理的主机 Endpoints。当没有内容为空时,不进行转发。 +- **LOGICAL_DNS**:Envoy 使用 DNS 添加主机,但如果 DNS 不再返回时,也不会丢弃。 +- **STRICT_DNS**:Envoy 将监控 DNS,而每个匹配的 A 记录都将被认为是有效的。 + +### 两个特殊集群 + +**BlackHoleCluster**:黑洞集群,匹配此集群的流量将被不会被转发。 + +```json +{ + "name": "BlackHoleCluster", + "type": "STATIC", + "connectTimeout": "10s" +} +``` + +类型为 static,但是没有指定可代理的 Endpoint,所以流量不会被转发。 + +**PassthroughCluster**:透传集群,匹配此集群的流量数据包的目的 IP 不会改变。 + +```json +{ + "name": "PassthroughCluster", + "type": "ORIGINAL_DST", + "connectTimeout": "10s", + "lbPolicy": "CLUSTER_PROVIDED", + "circuitBreakers": { + "thresholds": [ + { + "maxConnections": 4294967295, + "maxPendingRequests": 4294967295, + "maxRequests": 4294967295, + "maxRetries": 4294967295 + } + ] + } +``` + +类型为 original_dst,流量将原样转发。 + +### 一个特殊的 Listener + +istio 中有一个特殊的 Listener 叫 **virtualOutbound**,定义如下: + +- **virtualOutbound**:每个 Sidecar 都有一个绑定到 0.0.0.0:15001 的 listener,该 listener 下关联了许多 virtual listener。iptables 会先将所有出站流量导入该 listener,该 listener 有一个字段 useOriginalDst 设置为 true,表示会使用最佳匹配原始目的地的方式将请求分发到 virtual listener,如果没有找到任何 virtual listener,将会直接发送到数据包原目的地的 PassthroughCluster。 + +useOriginalDst 字段的具体意义是,如果使用 iptables 重定向连接,则代理接收流量的目标地址可能与原始目标地址不同。当此标志设置为 **true** 时,侦听器会将重定向流量**转交给与原始目标地址关联的侦听器**。如果没有与原始目标地址关联的侦听器,则流量由接收它的侦听器处理。默认为 false。 + +virtualOutbound 的流量处理流程如图所示: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171718702.png) + +这是 virtualOutbound 的部分配置: + +```json +{ + "name": "envoy.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy", + "statPrefix": "PassthroughCluster", + "cluster": "PassthroughCluster" + } +} +…………… +"useOriginalDst": true +``` + +## istio 的 outbond 流量处理 + +开启流量治理后,pod 访问外部资源的流量转发路径如图所示: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171719319.png) + +istio 注入后 istio-proxy 有一个监听在 15001 的端口,所有非 istio-proxy 用户进程产生的 outbond 流量,通过 iptables 规则被重定向到 15001。 + +```bash +# Sidecar 注入的 pod 监听的端口 +$ ss -tulnp +State Recv-Q Send-Q Local Address:Port Peer Address:Port +LISTEN 0 128 *:80 *:* +LISTEN 0 128 *:15090 *:* +LISTEN 0 128 127.0.0.1:15000 *:* +LISTEN 0 128 *:15001 *:* +LISTEN 0 128 *:15006 *:* +LISTEN 0 128 [::]:15020 [::]:* + +# Pod 内部的 iptables 表项 +$ iptables-save +# Generated by iptables-save v1.4.21 on Fri Sep 17 13:47:09 2021 +*nat +:PREROUTING ACCEPT [129886:7793160] +:INPUT ACCEPT [181806:10908360] +:OUTPUT ACCEPT [53409:3257359] +:POSTROUTING ACCEPT [53472:3261139] +:istio_INBOUND - [0:0] +:istio_IN_REDIRECT - [0:0] +:istio_OUTPUT - [0:0] +:istio_REDIRECT - [0:0] +-A PREROUTING -p tcp -j istio_INBOUND +-A OUTPUT -p tcp -j istio_OUTPUT +-A istio_INBOUND -p tcp -m tcp --dport 22 -j RETURN +-A istio_INBOUND -p tcp -m tcp --dport 15020 -j RETURN +-A istio_INBOUND -p tcp -j istio_IN_REDIRECT +-A istio_IN_REDIRECT -p tcp -j REDIRECT --to-ports 15006 +-A istio_OUTPUT -s 127.0.0.6/32 -o lo -j RETURN +-A istio_OUTPUT ! -d 127.0.0.1/32 -o lo -j istio_IN_REDIRECT +-A istio_OUTPUT -m owner --uid-owner 1337 -j RETURN +-A istio_OUTPUT -m owner --gid-owner 1337 -j RETURN +-A istio_OUTPUT -d 127.0.0.1/32 -j RETURN +-A istio_OUTPUT -j istio_REDIRECT +-A istio_REDIRECT -p tcp -j REDIRECT --to-ports 15001 +COMMIT +# Completed on Fri Sep 17 13:47:09 2021 +``` + +istio-proxy 收到流量后,大致的处理步骤如下: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171719244.png) + +- Proxy 在 ALLOW_ANY 模式下没有匹配上 listener 将被直接转发 +- listener 关联了 type 为 ORIGINAL_DST 的 cluster 将使用原始请求种的 IP 地址 +- 匹配上了 BlackHoleCluster,将不会被转发 + +被代理流量的匹配步骤大致如下: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171719669.png) + +> 疑问:isito 为 svc 创建的 listener 地址是全零的,集群内部的端口是会存在复用的,那 istio 到底是怎么区分流量的呢? + +关键就在于 route,route 由 virtual_host 条目组成,这些 virtual_host 条目就是根据 svc 的信息生成的,访问集群内部的 svc 时,在 route 里可以根据域名或者 svc 对应的 virtual_ip 进行精确匹配,所以完全不需要担心啦。 + +```bash +$ kubectl get svc -A | grep 8001 +NodePort 10.233.34.158 8001:30333/TCP 8d +NodePort 10.233.9.105 8001:31717/TCP 8d +NodePort 10.233.60.59 8001:31135/TCP 2d16h +NodePort 10.233.18.212 8001:32407/TCP 8d +NodePort 10.233.15.5 8001:30079/TCP 8d +NodePort 10.233.59.21 8001:31103/TCP 8d +NodePort 10.233.17.123 8001:31786/TCP 8d +NodePort 10.233.9.196 8001:32662/TCP 8d +NodePort 10.233.62.85 8001:32104/TCP 8d +ClusterIP 10.233.49.245 8000/TCP,8001/TCP,8443/TCP,8444/TCP +``` + +这是 route 下的 virtual_host 条目: + +```json + { + "name": "8001", + "virtualHosts": [ + { + "name": "merchant-center.open.svc.cluster.local:8001", + "domains": [ + "merchant-center.open.svc.cluster.local", + "merchant-center.open.svc.cluster.local:8001", + "merchant-center.open", + "merchant-center.open:8001", + "merchant-center.open.svc.cluster", + "merchant-center.open.svc.cluster:8001", + "merchant-center.open.svc", + "merchant-center.open.svc:8001", + "10.233.60.59", + "10.233.60.59:8001" + ], + "routes": [ + { + "name": "default", + "match": { + "prefix": "/" + }, + "route": { + "cluster": "outbound|8001||merchant-center.open.svc.cluster.local", + "timeout": "0s", + "retryPolicy": { + "retryOn": "connect-failure,refused-stream,unavailable,cancelled,resource-exhausted,retriable-status-codes", + "numRetries": 2, + "retryHostPredicate": [ + { + "name": "envoy.retry_host_predicates.previous_hosts" + } + ], + "hostSelectionRetryMaxAttempts": "5", + "retriableStatusCodes": [ + 503 + ] + }, + "maxGrpcTimeout": "0s" + }, +………………… +{ + "name": "cashier-busi-svc.pay.svc.cluster.local:8001", + "domains": [ + "cashier-busi-svc.pay.svc.cluster.local", + "cashier-busi-svc.pay.svc.cluster.local:8001", + "cashier-busi-svc.pay", + "cashier-busi-svc.pay:8001", + "cashier-busi-svc.pay.svc.cluster", + "cashier-busi-svc.pay.svc.cluster:8001", + "cashier-busi-svc.pay.svc", + "cashier-busi-svc.pay.svc:8001", + "10.233.17.123", + "10.233.17.123:8001" + ], +………………… + { + "name": "center-job.manager.svc.cluster.local:8001", + "domains": [ + "center-job.manager.svc.cluster.local", + "center-job.manager.svc.cluster.local:8001", + "center-job.manager", + "center-job.manager:8001", + "center-job.manager.svc.cluster", + "center-job.manager.svc.cluster:8001", + "center-job.manager.svc", + "center-job.manager.svc:8001", + "10.233.34.158", + "10.233.34.158:8001" + ], +…………… +``` + +## 问题分析 + +基于以上信息,对集群内的 svc 进行端口过滤,终于发现了集群中存在使用了 7000 端口的 service: + +![使用7000端口的svc](https://pek3b.qingstor.com/kubesphere-community/images/202109171402399.png) + +istio 会为 10.233.0.115:7000 自动生成一个 0.0.0.0:7000 的 listener: + +```bash +ADDRESS PORT TYPE +0.0.0.0 7000 TCP +``` + +查看详细配置信息,在该 listener 中对于 tcp 流量是不转发(BlackHoleCluster),所以目标地址为 10.0.x.x:7000 的流量被 listener_0.0.0.0:7000 匹配到时,因为是 tcp 的流量(nc 命令默认 tcp 协议),所以代理没有对该流量进行转发。这与开头提到的 pod 没有流量发出来现象一致。 + +```json +{ + "name": "0.0.0.0_7000", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 7000 + } + }, + "filterChains": [ + { + "filterChainMatch": { + "prefixRanges": [ + { + "addressPrefix": "10.64.x.x", + "prefixLen": 32 + } + ] + }, + "filters": [ + { + "name": "envoy.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy", + "statPrefix": "BlackHoleCluster", + "cluster": "BlackHoleCluster" + } + } + ] +} +``` + +至于 7001-7003 为什么能通,是因为 istio-proxy 默认使用的是 ALLOW_ANY 模式,对于没有匹配上 listener 的流量是直接放行。可以通过 istio_configmap 配置信息来验证一下: + +```bash +$ kubectl get cm istio -n istio-system -o yaml | grep -i -w -a3 "mode" + # REGISTRY_ONLY - restrict outbound traffic to services defined in the service registry as well + # as those defined through ServiceEntries + outboundTrafficPolicy: + mode: ALLOW_ANY + localityLbSetting: + enabled: true + # The namespace to treat as the administrative root namespace for istio +-- + drainDuration: 45s + parentShutdownDuration: 1m0s + # + # The mode used to redirect inbound connections to Envoy. This setting + # has no effect on outbound traffic: iptables REDIRECT is always used for + # outbound connections. + # If "REDIRECT", use iptables REDIRECT to NAT and redirect to Envoy. + # The "REDIRECT" mode loses source addresses during redirection. + # If "TPROXY", use iptables TPROXY to redirect to Envoy. + # The "TPROXY" mode preserves both the source and destination IP + # addresses and ports, so that they can be used for advanced filtering + # and manipulation. + # The "TPROXY" mode also configures the Sidecar to run with the + # CAP_NET_ADMIN capability, which is required to use TPROXY. + #interceptionMode: REDIRECT + # +``` + +## 解决方案 + +最后我们来解决开头提到的问题,总共有三种解决方案。 + +### 方法 1:Service Entry + +服务条目(Service Entry)是 istio 重要的资源对象之一,作用是将外部的资源注册到 istio 内部的网格服务中来,以提供网格内对外部资源的更加精细化的控制。我们可以简单理解为白名单,istios 根据 Service Entry 的内容生成 listeners。 + +我们在命名空间 dev-self-pc-ct 中添加如下配置: + +```yaml +$ kubectl apply -f - < +Port: tcp-7001 7001/TCP +TargetPort: 7001/TCP +Endpoints: +Port: tcp-7002 7002/TCP +TargetPort: 7002/TCP +Endpoints: +Port: tcp-7003 7003/TCP +TargetPort: 7003/TCP +Endpoints: +Session Affinity: None +Events: +``` + +Listener 部分信息如下: + +```json +{ + "name": "10.233.59.159_7000", + "address": { + "socketAddress": { + "address": "10.233.59.159", + "portValue": 7000 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "mixer", + "typedConfig": { + "@type": "type.googleapis.com/istio.mixer.v1.config.client.TcpClientConfig", + "transport": { + "networkFailPolicy": { + "policy": "FAIL_CLOSE", + "baseRetryWait": "0.080s", + "maxRetryWait": "1s" + }, + "checkCluster": "outbound|9091||istio-policy.istio-system.svc.cluster.local", + "reportCluster": "outbound|9091||istio-telemetry.istio-system.svc.cluster.local", + "reportBatchMaxEntries": 100, + "reportBatchMaxTime": "1s" + }, + "mixerAttributes": { + "attributes": { + "context.proxy_version": { + "stringValue": "1.4.8" + }, +...... +``` + +该 listener 指向了一个 cluster: + +```json +{ + "name": "envoy.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy", + "statPrefix": "outbound|7000||redis", + "cluster": "outbound|7000||redis" + } +} +``` + +对应的 service 信息如下: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171431327.png) + +可以看到 endpoint 就是刚才我们指定的外部服务器地址: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171431448.png) + +进行访问测试: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202109171432447.png) + +已经可以正常访问了。 + +## 总结 + +最后我们来比较一下这三种方法。 + +- **方法 1**:通过添加 ServiceEntry,以允许访问外部服务。可以让你使用 istio 服务网格所有的功能去调用集群内或集群外的服务,这是官方推荐的方法。 +- **方法 2**:直接绕过了 istio Sidecar 代理,使你的服务可以直接访问任意的外部服务。 但是,以这种方式配置代理需要了解集群提供商相关知识和配置。将失去对外部服务访问的监控,并且无法将 istio 功能应用于外部服务的流量。 +- **方法 3**:这个方法相对于其他两种,配置有点复杂,同时还要通过 service 的方式来访问外部服务,这意味着对于已经存在的应用需要进行改造。具体能否实施看实际情况。 + +方法 1 的做法类似于“白名单”,不但能达到访问外部服务的目的,并且可以像集群内部服务一样对待(可使用 istio 的流量控制功能)。另外,即使服务受到入侵,由于“白名单”的设置入侵者也无法(或较难)将流量回传到入侵机器,进一步保证了服务的安全性; + +方法 2 直接绕过了 istio Sidecar 代理,使你的服务可以直接访问任意的外部服务。 但是,以这种方式配置代理需要了解集群提供商相关知识和配置。 你将失去对外部服务访问的监控,并且无法将 istio 功能应用于外部服务的流量; + +方法 3 虽然也可以使用 istio 的流量控制功能来管理外部流量,但是在实际操作中会存在配置复杂、改造应用等问题 + +因此,强烈推荐大家使用方法一。最后,特别提醒一下大家。**将 `includeOutboundIPRanges` 设置为空**是有问题的,这**相当于将所有的服务都配置代理绕行**,那 Sidecar 就没起作用了,没了 Sidecar 的 istio 就没有灵魂了。。 \ No newline at end of file diff --git a/content/zh/blogs/deep-dive-into-the-K8s-request-and-limit.md b/content/zh/blogs/deep-dive-into-the-K8s-request-and-limit.md index 7c8261951..384e3eed3 100644 --- a/content/zh/blogs/deep-dive-into-the-K8s-request-and-limit.md +++ b/content/zh/blogs/deep-dive-into-the-K8s-request-and-limit.md @@ -1,6 +1,7 @@ --- title: '你真的理解 K8s 中的 requests 和 limits 吗?' -tag: 'Kubernetes,KubeSphere,schedule,monitoring' +tag: 'Kubernetes,KubeSphere' +keyword: 'Kubernetes,KubeSphere,requests,limits' createTime: '2021-01-01' author: '饶云坤' snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/cover.png' @@ -70,7 +71,7 @@ Kubernetes 创建 Pod 时就给它指定了下列一种 QoS 类:Guaranteed,B ### 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 ( ws-admin ),务必邀请该帐户到项目中并赋予 admin 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 ( ws-admin ),务必邀请该用户到项目中并赋予 admin 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 ### 设置项目配额( Resource Quotas ) diff --git a/content/zh/blogs/dockershim-out-of-kubernetes.md b/content/zh/blogs/dockershim-out-of-kubernetes.md index d9d5b713b..4cf8bcaf3 100644 --- a/content/zh/blogs/dockershim-out-of-kubernetes.md +++ b/content/zh/blogs/dockershim-out-of-kubernetes.md @@ -91,6 +91,8 @@ EOF $ systemctl enable containerd && systemctl restart containerd ``` +> 如果`containerd config dump |grep sandbox_image`仍是显示`k8s.gcr.io/pause:xxx`,请将`version = 2`添加到`/etc/containerd/config.toml`开头并执行`systemctl restart containerd`。 + 4. 安装 crictl。 ```shell diff --git a/content/zh/blogs/ebpf-guide.md b/content/zh/blogs/ebpf-guide.md new file mode 100644 index 000000000..e71f49714 --- /dev/null +++ b/content/zh/blogs/ebpf-guide.md @@ -0,0 +1,142 @@ +--- +title: 'eBPF 概述,第 1 部分:介绍' +tag: 'eBPF' +keywords: 'eBPF' +description: '有兴趣了解更多关于 eBPF 技术的底层细节?那么请继续移步,我们将深入研究 eBPF 的底层细节,从其虚拟机机制和工具,到在远程资源受限的嵌入式设备上运行跟踪。' +createTime: '2021-10-14' +author: 'Adrian Ratiu' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/ebpf-guide-cover.png' +--- + +> 原文链接: https://www.collabora.com/news-and-blog/blog/2019/04/05/an-ebpf-overview-part-1-introduction/ + +> **作者:Adrian Ratiu**
    +> **译者:狄卫华**
    +> **注:本文已取得作者本人的翻译授权** + +## 1. 前言 + +**有兴趣了解更多关于 eBPF 技术的底层细节?那么请继续移步,我们将深入研究 eBPF 的底层细节,从其虚拟机机制和工具,到在远程资源受限的嵌入式设备上运行跟踪。** + +注意:本系列博客文章将集中在 eBPF 技术,因此对于我们来讲,文中 BPF 和 eBPF 等同,可相互使用。BPF 名字/缩写已经没有太大的意义,因为这个项目的发展远远超出了它最初的范围。BPF 和 eBPF 在该系列中会交替使用。 + +- [第 1 部分](https://www.collabora.com/news-and-blog/blog/2019/04/05/an-ebpf-overview-part-1-introduction/)和[第 2 部分](https://www.collabora.com/news-and-blog/blog/2019/04/15/an-ebpf-overview-part-2-machine-and-bytecode/) 为新人或那些希望通过深入了解 eBPF 技术栈的底层技术来进一步了解 eBPF 技术的人提供了深入介绍。 +- [第 3 部分](https://www.collabora.com/news-and-blog/blog/2019/04/26/an-ebpf-overview-part-3-walking-up-the-software-stack/)是对用户空间工具的概述,旨在提高生产力,建立在第 1 部分和第 2 部分中介绍的底层虚拟机机制之上。 +- [第 4 部分](https://www.collabora.com/news-and-blog/blog/2019/05/06/an-ebpf-overview-part-4-working-with-embedded-systems/)侧重于在资源有限的嵌入式系统上运行 eBPF 程序,在嵌入式系统中完整的工具链技术栈(BCC/LLVM/python 等)是不可行的。我们将使用占用资源较小的嵌入式工具在 32 位 ARM 上交叉编译和运行 eBPF 程序。只对该部分感兴趣的读者可选择跳过其他部分。 +- [第 5 部分](https://www.collabora.com/news-and-blog/blog/2019/05/14/an-ebpf-overview-part-5-tracing-user-processes/)是关于用户空间追踪。到目前为止,我们的努力都集中在内核追踪上,所以是时候我们关注一下用户进程了。 + +如有疑问时,可使用该流程图: + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-flowchart.png) + +## 2. eBPF 是什么? + +eBPF 是一个基于寄存器的虚拟机,使用自定义的 64 位 RISC 指令集,能够在 Linux 内核内运行即时本地编译的 "BPF 程序",并能访问内核功能和内存的一个子集。这是一个完整的虚拟机实现,不要与基于内核的虚拟机(KVM)相混淆,后者是一个模块,目的是使 Linux 能够作为其他虚拟机的管理程序。eBPF 也是主线内核的一部分,所以它不像其他框架那样需要任何第三方模块([LTTng](https://lttng.org/docs/v2.10/#doc-lttng-modules) 或 [SystemTap](https://kernelnewbies.org/SystemTap)),而且几乎所有的 Linux 发行版都默认启用。熟悉 DTrace 的读者可能会发现 [DTrace/BPFtrace 对比](http://www.brendangregg.com/blog/2018-10-08/dtrace-for-linux-2018.html)非常有用。 + +在内核内运行一个完整的虚拟机主要是考虑便利和安全。虽然 eBPF 程序所做的操作都可以通过正常的内核模块来处理,但直接的内核编程是一件非常危险的事情 - 这可能会导致系统锁定、内存损坏和进程崩溃,从而导致安全漏洞和其他意外的效果,特别是在生产设备上(eBPF 经常被用来检查生产中的系统),所以通过一个安全的虚拟机运行本地 JIT 编译的快速内核代码对于安全监控和沙盒、网络过滤、程序跟踪、性能分析和调试都是非常有价值的。部分简单的样例可以在这篇优秀的 [eBPF 参考](http://www.brendangregg.com/ebpf.html)中找到。 + +基于设计,eBPF 虚拟机和其程序有意地设计为**不是**图灵完备的:即不允许有循环(正在进行的工作是支持有界循环【译者注:已经支持有界循环,\#pragma unroll 指令】),所以每个 eBPF 程序都需要保证完成而不会被挂起、所有的内存访问都是有界和类型检查的(包括寄存器,一个 MOV 指令可以改变一个寄存器的类型)、不能包含空解引用、一个程序必须最多拥有 BPF_MAXINSNS 指令(默认 4096)、"主"函数需要一个参数(context)等等。当 eBPF 程序被加载到内核中,其指令被验证模块解析为有向环状图,上述的限制使得正确性可以得到简单而快速的验证。 + +> 译者注: BPF_MAXINSNS 这个限制已经被放宽至 100 万条指令(BPF_COMPLEXITY_LIMIT_INSNS),但是非特权执行的 BPF 程序这个限制仍然会保留。 + +历史上,eBPF (cBPF) 虚拟机只在内核中可用,用于过滤网络数据包,与用户空间程序没有交互,因此被称为 "伯克利数据包过滤器"【译者注:早期的 BPF 实现被称为经典 cBPF】。从内核 v3.18(2014 年)开始,该虚拟机也通过 [bpf() syscall](https://github.com/torvalds/linux/blob/v4.20/tools/lib/bpf) 和[uapi/linux/bpf.h](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h) 暴露在用户空间,这导致其指令集在当时被冻结,成为公共 ABI,尽管后来仍然可以(并且已经)添加新指令。 + +因为内核内的 eBPF 实现是根据 GPLv2 授权的,它不能轻易地被非 GPL 用户重新分发,所以也有一个替代的 Apache 授权的用户空间 eBPF 虚拟机实现,称为 "uBPF"。撇开法律条文不谈,基于用户空间的实现对于追踪那些需要避免内核-用户空间上下文切换成本的性能关键型应用很有用。 + + + +## 3. eBPF 是怎么工作的? + +eBPF 程序在事件触发时由内核运行,所以可以被看作是一种函数挂钩或事件驱动的编程形式。从用户空间运行按需 eBPF 程序的价值较小,因为所有的按需用户调用已经通过正常的非 VM 内核 API 调用("syscalls")来处理,这里 VM 字节码带来的价值很小。事件可由 kprobes/uprobes、tracepoints、dtrace probes、socket 等产生。这允许在内核和用户进程的指令中钩住(hook)和检查任何函数的内存、拦截文件操作、检查特定的网络数据包等等。一个比较好的参考是 [Linux 内核版本对应的 BPF 功能](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)。 + +如前所述,事件触发了附加的 eBPF 程序的执行,后续可以将信息保存至 map 和环形缓冲区(ringbuffer)或调用一些特定 API 定义的内核函数的子集。一个 eBPF 程序可以链接到多个事件,不同的 eBPF 程序也可以访问相同的 map 以共享数据。一个被称为 "program array" 的特殊读/写 map 存储了对通过 bpf() 系统调用加载的其他 eBPF 程序的引用,在该 map 中成功的查找则会触发一个跳转,而且并不返回到原来的 eBPF 程序。这种 eBPF 嵌套也有限制,以避免无限的递归循环。 + +运行 eBPF 程序的步骤: + +1. 用户空间将字节码和程序类型一起发送到内核,程序类型决定了可以访问的内核区域【译者注:主要是 BPF 帮助函数的各种子集】。 +2. 内核在字节码上运行验证器,以确保程序可以安全运行(kernel/bpf/verifier.c)。 +3. 内核将字节码编译为本地代码,并将其插入(或附加到)指定的代码位置。【译者注:如果启用了 JIT 功能,字节码编译为本地代码】。 +4. 插入的代码将数据写入环形缓冲区或通用键值 map。 +5. 用户空间从共享 map 或环形缓冲区中读取结果值。 + +map 和环形缓冲区结构是由内核管理的(就像管道和 FIFO 一样),独立于挂载的 eBPF 或访问它们的用户程序。对 map 和环形缓冲区结构的访问是异步的,通过文件描述符和引用计数实现,可确保只要有至少一个程序还在访问,结构就能够存在。加载的 JIT 后代码通常在加载其的用户进程终止时被删除,尽管在某些情况下,它仍然可以在加载进程的生命期之后继续存在。 + +为了方便编写 eBPF 程序和避免进行原始的 bpf()系统调用,内核提供了方便的 [libbpf 库](https://github.com/torvalds/linux/blob/v4.20/tools/lib/bpf),包含系统调用函数包装器,如 [bpf_load_program](https://github.com/torvalds/linux/blob/v4.20/tools/lib/bpf/bpf.c#L214) 和结构定义(如 [bpf_map](https://github.com/torvalds/linux/blob/v4.20/tools/lib/bpf/libbpf.c#L157)),在 LGPL 2.1 和 BSD 2-Clause 下双重许可,可以静态链接或作为 DSO。内核代码也提供了一些使用 libbpf 简洁的例子,位于目录 [samples/bpf/](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/) 中。 + + + +## 4. 样例学习 + +内核开发者非常可怜,因为内核是一个独立的项目,因而没有用户空间诸如 Glibc、LLVM、JavaScript 和 WebAssembly 诸如此类的好东西! - 这就是为什么内核中 eBPF 例子中会包含原始字节码或通过 libbpf 加载预组装的字节码文件。我们可以在 [sock_example.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c) 中看到这一点,这是一个简单的用户空间程序,使用 eBPF 来计算环回接口上统计接收到 TCP、UDP 和 ICMP 协议包的数量。 + +我们跳过微不足道的的 [main](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L98) 和 [open_raw_sock](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.h#L13) 函数,而专注于神奇的代码 [test_sock](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L35)。 + +```c +static int test_sock(void) +{ + int sock = -1, map_fd, prog_fd, i, key; + long long value = 0, tcp_cnt, udp_cnt, icmp_cnt; + + map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value), 256, 0); + if (map_fd < 0) {printf("failed to create map'%s'\n", strerror(errno)); + goto cleanup; + } + + struct bpf_insn prog[] = {BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ + BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ + BPF_EXIT_INSN(),}; + size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + + prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, insns_cnt, + "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE); + if (prog_fd < 0) {printf("failed to load prog'%s'\n", strerror(errno)); + goto cleanup; + } + + sock = open_raw_sock("lo"); + + if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) < 0) {printf("setsockopt %s\n", strerror(errno)); + goto cleanup; + } +``` + +首先,通过 libbpf API 创建一个 BPF map,该行为就像一个最大 256 个元素的固定大小的数组。按 [IPROTO_*](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/in.h#L28) 定义的键索引网络协议(2 字节的 word),值代表各自的数据包计数(4 字节大小)。除了数组,eBPF 映射还实现了[其他数据结构类型](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L113),如栈或队列。 + +接下来,eBPF 的字节码指令数组使用方便的[内核宏](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/bpf_insn.h)进行定义。在这里,我们不会讨论字节码的细节(这将在第 2 部分描述机器后进行)。更高的层次上,字节码从数据包缓冲区中读取协议字,在 map 中查找,并增加特定的数据包计数。 + +然后 BPF 字节码被加载到内核中,并通过 libbpf 的 bpf_load_program 返回 fd 引用来验证正确/安全。调用指定了 eBPF [程序类型](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L138),这决定了它可以访问哪些内核子集。因为样例是一个 SOCKET_FILTER 类型,因此提供了一个指向当前网络包的参数。最后,eBPF 的字节码通过套接字层被附加到一个特定的原始套接字上,之后在原始套接字上接受到的每一个数据包运行 eBPF 字节码,无论协议如何。 + +剩余的工作就是让用户进程开始轮询共享 map 的数据。 + +```c + for (i = 0; i < 10; i++) { + key = IPPROTO_TCP; + assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0); + + key = IPPROTO_UDP; + assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0); + + key = IPPROTO_ICMP; + assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0); + + printf("TCP %lld UDP %lld ICMP %lld packets\n", + tcp_cnt, udp_cnt, icmp_cnt); + sleep(1); + } +} +``` + + + +## 5. 总结 + +第 1 部分介绍了 eBPF 的基础知识,我们通过如何加载字节码和与 eBPF 虚拟机通信的例子进行了讲述。由于篇幅限制,编译和运行例子作为留给读者的练习。我们也有意不去分析具体的 eBPF 字节码指令,因为这将是第 2 部分的重点。在我们研究的例子中,用户空间通过 libbpf 直接用 C 语言从内核虚拟机中读取 eBPF map 值(使用 10 次 1 秒的睡眠!),这很笨重,而且容易出错,而且很快就会变得很复杂,所以在第 3 部分,我们将研究更高级别的工具,通过脚本或特定领域的语言自动与虚拟机交互。 diff --git a/content/zh/blogs/ebpf-machine-bytecode.md b/content/zh/blogs/ebpf-machine-bytecode.md new file mode 100644 index 000000000..dc6548498 --- /dev/null +++ b/content/zh/blogs/ebpf-machine-bytecode.md @@ -0,0 +1,133 @@ +--- +title: 'eBPF 概述,第 2 部分:机器和字节码' +tag: 'eBPF' +keywords: 'eBPF' +description: '本系列的第二部分更深入地研究了第一部分中研究的 eBPF VM 和程序。' +createTime: '2021-10-21' +author: 'Adrian Ratiu' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/ebpf-guide-cover.png' +--- + +> 原文链接: https://www.collabora.com/news-and-blog/blog/2019/04/15/an-ebpf-overview-part-2-machine-and-bytecode/ + +> **作者:Adrian Ratiu**
    +> **译者:狄卫华**
    +> **注:本文已取得作者本人的翻译授权** + +在我们的[第一篇文章](https://kubesphere.com.cn/blogs/ebpf-guide/)中,我们介绍了 eBPF VM、它刻意的设计限制以及如何从用户空间进程与其交互。如果您还没有阅读它,您可能需要在继续阅读本篇文章之前阅读上一篇文章,因为如果没有适当了解,直接从机器和字节码细节开始学习可能会很困难。如有疑问,请参阅第一部分开头的流程图。 + +本系列的第二部分更深入地研究了第一部分中研究的 eBPF VM 和程序。掌握这种底层知识不是强制性的,但对于本系列的其余部分来说是非常有用的基础,我们将在其中检查建立在这些机制之上的更高级别的工具。 + +## 虚拟机 +eBPF 是一个 RISC 寄存器机,共有 [11 个 64 位寄存器](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L45),一个程序计数器和一个 512 字节固定大小的堆栈。九个寄存器是通用读写的,一个是只读堆栈指针,程序计数器是隐式的,即我们只能跳转到计数器的某个偏移量。VM 寄存器始终为 64 位宽(即使在 32 位 ARM 处理器内核中运行!)并且如果最高有效的 32 位为零,则支持 32 位子寄存器寻址 - 这将在第四部分在嵌入式设备上交叉编译和运行 eBPF 程序非常有用。 + +这些寄存器是: + +| | | +| -------- | -------- | +| r0: | 存储函数调用和当前程序退出代码的返回值 | +|r1 - r5: |作为函数调用的参数,在程序开始时 r1 包含 “上下文” 参数指针 | +|r6 - r9: |这些在内核函数调用之间被保留 | +|r10: | 每个 eBPF 程序512字节堆栈的只读指针 | + +在加载时提供的 eBPF [程序类型](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L136)准确地决定了哪些内核函数子集可以调用,以及在程序启动时通过 r1 提供的 “上下文” 参数。r0 中存储的程序退出值的含义也是由程序类型决定的。 + +每个函数调用在寄存器 r1 - r5 中最多可以有 5 个参数;这适用于 eBPF 到 eBPF 和内核函数的调用。寄存器 r1 - r5 只能存储数字或指向堆栈的指针(作为参数传递给函数),从不直接指向任意内存的指针。所有内存访问都必须先将数据加载到 eBPF 堆栈中,然后才能在 eBPF 程序中使用它。此限制有助于 eBPF 验证器,它简化了内存模型以实现更轻松的正确性检查。 + +BPF 可访问的内核“辅助”函数由内核核心(不可通过模块扩展)通过类似于定义系统调用的 API 定义,使用 [BPF_CALL_*](https://github.com/torvalds/linux/blob/v4.20/include/linux/filter.h#L441) 宏。[bpf.h](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L420) 试图为所有 BPF 可访问的内核“辅助”函数提供参考。例如 [bpf_trace_printk](https://github.com/torvalds/linux/blob/v4.20/kernel/trace/bpf_trace.c#L163) 的定义使用 BPF_CALL_5 和 5 对类型/参数名称。定义[参数数据类型](https://github.com/torvalds/linux/blob/v4.20/kernel/trace/bpf_trace.c#L276)很重要,因为在每个 eBPF 程序加载时,eBPF 验证器确保寄存器数据类型与被调用方参数类型匹配。 + +eBPF 指令也是固定大小的 64 位编码,大约 100 条指令(目前...)分为 [8 类](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf_common.h#L5)。VM 支持来自通用内存( map 、堆栈、“上下文”如数据包缓冲区等)的 1 - 8 字节加载/存储、向前/向后(非)条件跳转、算术/逻辑运算和函数调用。如需深入了解操作码格式,请参阅 Cilium 项目[指令集文档](https://cilium.readthedocs.io/en/latest/bpf/#instruction-set)。IOVisor 项目还维护了一个有用的[指令规范](https://github.com/iovisor/bpf-docs/blob/master/eBPF.md)。 + +在本系列第一部分研究的示例中,我们使用了一些有用的[内核宏](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/bpf_insn.h)来使用以下[结构](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/bpf.h#L64)创建 eBPF 字节码指令数组(所有指令都以这种方式编码): + +```c +struct bpf_insn { + __u8 代码;/* opcode */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ + __s16 off; /* signed offset */ + __s32 imm; /* signed immediate constant */ +}; + +msb lsb ++------------------------+----------------+----+----+--------+ +|immediate |offset |src |dst |opcode | ++------------------------+----------------+----+----+--------+ +``` + +让我们看一下 [BPF_JMP_IMM](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/bpf_insn.h#L167) 指令,它针对立即值对条件跳转进行编码。下面的宏注释对指令逻辑应该是不言自明的。操作码编码指令类 BPF_JMP 、操作(通过 BPF_OP 位域传递以确保正确性)和表示它是对立即数/常量值 BPF_K 的操作的标志进行编码。 + +```c +#define BPF_OP(code) ((code) & 0xf0) +#define BPF_K 0x00 + +/* 针对立即数的条件跳转,if (dst_reg 'op' imm32) goto pc + off16 */ + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0 + , \ + .off = OFF, \ .imm = IMM }) + +``` + +如果我们计算值或反汇编包含 BPF_JMP_IMM ( BPF_JEQ , BPF_REG_0 , 0 , 2 ) 的 eBPF 字节码二进制文件,我们会发现它是 0x020015 格式。这个特定的字节码非常频繁地用于测试存储在 r0 中的函数调用的返回值;如果 r0 == 0,它会跳过接下来的 2 条指令。 + +## 重温我们的字节码 +现在我们已经掌握了必要的知识来完全理解本系列第一部分中使用的字节码 eBPF 示例,我们将逐步解释它。请记住,[sock_example.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c) 是一个简单的用户态程序,它使用 eBPF 来计算在环回接口上接收到的 TCP、UDP 和 ICMP 协议数据包的数量。 + +在高层次上,代码所做的是从接收到的数据包中读取协议编号,然后将其推送到 eBPF 堆栈上,用作 map_lookup_elem 调用的索引,该调用获取相应协议的数据包计数。map_lookup_elem 函数采用 r0 中的索引(或键)指针和 r1 中的 map 文件描述符。如果查找调用成功,r0 将包含一个指向存储在协议索引处的 map 值的指针。然后我们原子地增加 map 值并退出。 + +`BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),` + +当 eBPF 程序启动时,上下文(在这种情况下是数据包缓冲区)由 r1 中的地址指向。r1 将在函数调用期间用作参数,因此我们也将其存储在 r6 中作为备份。 + +`BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */),` + +该指令将一个字节( BPF_B )从上下文缓冲区(在本例中为网络数据包缓冲区)中的偏移量加载到 r0 中,因此我们提供要加载到 r0 的 [iphdr 结构](https://github.com/torvalds/linux/blob/v4.20/include/uapi/linux/ip.h#L86)中的协议字节的偏移量。 + +`BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */` + +将包含先前读取的协议的字 ( BPF_W ) 压入堆栈(由 r10 指向,以偏移量 -4 字节开头)。 + +```c= +BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), +BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ +``` + +将堆栈地址指针移至 r2 并减去 4,因此现在 r2 指向协议值,用作下一次 map 键查找的参数。 + +`BPF_LD_MAP_FD(BPF_REG_1, map_fd),` + +将本地进程内的文件描述符引用包含协议包计数的 map 到 r1 寄存器。 + +`BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),` + +使用 r2 指向的堆栈中的协议值作为键执行 map 查找调用。结果存储在 r0 中:指向由键索引的值的指针地址。 + +`BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),` + +还记得 0x020015 格式吗?这与第一部分的字节码相同。如果 map 查找没有成功,则 r0 == 0 所以我们跳过接下来的两条指令。 + +```c= +BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ +BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ +``` + +增加 r0 指向的地址处的 map 值。 + +```c= +BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ +BPF_EXIT_INSN(), +``` + +将 eBPF retcode 设置为 0 并退出。 + +尽管这个 sock_example 逻辑非常简单(它只是在 map 中增加的一些数字),但在原始字节码中实现或理解是困难的。更复杂的任务在像这样的汇编程序中完成时变得极其困难。展望未来,我们将开始使用更高级的语言和工具,以更少的工作开启更强大的 eBPF 用例。 + +## 总结 +在这一部分中,我们仔细观察了 eBPF 虚拟机的寄存器和指令集,了解了 eBPF 可访问的内核函数是如何从字节码中调用的,以及它们是如何被核心内核通过类似 syscall 的特殊目的 API 定义的。我们也完全理解了第一部分例子中使用的字节码。还有一些未探索的领域,如创建多个 eBPF 程序函数或链式 eBPF 程序以绕过 Linux 发行版的 4096 条指令限制。也许我们会在以后的文章中探讨这些。 + +现在,主要的问题是编写原始字节码很困难的,这非常像编写汇编代码,而且编写效率低下。在第三部分中,我们将开始研究使用高级语言编译成 eBPF 字节码,到此为止我们已经了解了虚拟机工作的底层基础知识。 diff --git a/content/zh/blogs/ebpf-software-stack.md b/content/zh/blogs/ebpf-software-stack.md new file mode 100644 index 000000000..d4af5ebad --- /dev/null +++ b/content/zh/blogs/ebpf-software-stack.md @@ -0,0 +1,157 @@ +--- +title: 'eBPF 概述,第 3 部分:软件开发生态' +tag: 'eBPF' +keywords: 'eBPF' +description: '为了理解这些工具是如何工作的,我们先定义一下 eBPF 程序的高层次组件:后端、加载器、前端和数据结构等。开发方式包括 LLVM eBPF 编译器/BCC/BPFTrace/IOVisor 等实现。' +createTime: '2021-11-05' +author: 'Adrian Ratiu' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/ebpf-guide-cover.png' +--- + +> 原文链接: https://www.collabora.com/news-and-blog/blog/2019/04/26/an-ebpf-overview-part-3-walking-up-the-software-stack/ + +> **作者:Adrian Ratiu**
    +> **译者:狄卫华**
    +> **注:本文已取得作者本人的翻译授权** + +## 1. 前言 + +在本系列的[第 1 部分](https://kubesphere.com.cn/blogs/ebpf-guide/)和[第 2 部分](https://kubesphere.com.cn/blogs/ebpf-machine-bytecode/)中,我们对 eBPF 虚拟机进行了简洁的深入研究。阅读上述部分并不是理解第 3 部分的必修课,尽管很好地掌握了低级别的基础知识确实有助于更好地理解高级别的工具。为了理解这些工具是如何工作的,我们先定义一下 eBPF 程序的高层次组件: + +- **后端**:这是在内核中加载和运行的 eBPF 字节码。它将数据写入内核 map 和环形缓冲区的**数据结构**中。 +- **加载器:**它将字节码**后端**加载到内核中。通常情况下,当加载器进程终止时,字节码会被内核自动卸载。 +- **前端:**从**数据结构**中读取数据(由**后端**写入)并将其显示给用户。 +- **数据结构**:这些是**后端**和**前端**之间的通信手段。它们是由内核管理的 map 和环形缓冲区,可以通过文件描述符访问,并需要在**后端**被加载之前创建。它们会持续存在,直到没有更多的**后端**或**前端**进行读写操作。 + +在第 1 部分和第 2 部分研究的 [sock_example.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c) 中,所有的组件都被放置在一个 C 文件中,所有的动作都由用户进程完成。 + +- [第 40-45 行](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L40-L45)创建 map**数据结构**。 +- [第 47-61 行](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L47-L61)定义**后端**。 +- [第 63-76 行](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L63-L76)在内核中**加载**后端 +- [第 78-91 行](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L78-L91)是**前端**,负责将从 map 文件描述符中读取的数据打印给用户。 + +eBPF 程序可以更加复杂:多个**后端**可以由一个(或单独的多个!)**加载器**进程加载,写入多个**数据结构**,然后由多个**前端**进程读取,所有这些都可以发生在一个跨越多个进程的用户 eBPF 应用程序中。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part3-Diagram1.png) + + + +## 2. 层级 1:容易编写的后端:LLVM eBPF 编译器 + +我们在前面的文章中看到,在内核中编写原始的 eBPF 字节码是不仅困难而且低效,这非常像用处理器的汇编语言编写程序,所以很自然地开发了一个能够将 LLVM 中间表示编译成 eBPF 程序的模块,并从 2015 年的 v3.7 开始发布(GCC 到现在为止仍然不支持 eBPF)。这使得多种高级语言如 C、Go 或 Rust 的子集可以被编译到 eBPF。最成熟和最流行的是基于 C 语言编写的方式,因为内核也是用 C 写的,这样就更容易复用现有的内核头文件。 + +LLVM 将 "受限制的 C" 语言(记住,没有无界循环,最大 4096 条指令等等,见第 1 部分开始)编译成 ELF 对象文件,其中包含特殊区块(section),并可基于 bpf()系统调用,使用 libbpf 等库加载到内核中。这种设计有效地将**后端**定义从**加载器**和**前端**中分离出来,因为 eBPF 字节码包含在 ELF 文件中。 + +内核还在 [samples/bpf/](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/) 下提供了使用这种模式的例子:\*\_kern.c 文件被编译为 \*\_kern.o(**后端**代码),被 \*\_user.c(**装载器**和**前端**)加载。 + +将本系列第 1 和第 2 部分的 [sock_exapmle.c 原始字节码](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L47-L61) 转换为 "受限的 C" 代码“ [sockex1_kern.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c#L47-L61),这比原始字节码更容易理解和修改。 + +```c +#include +#include +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = 256, +}; + +SEC("socket1") +int bpf_prog1(struct __sk_buff *skb) +{int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); + long *value; + + value = bpf_map_lookup_elem(&my_map, &index); + if (value) + __sync_fetch_and_add(value, skb->len); + + return 0; +} +char _license[] SEC("license") = "GPL"; +``` + +产生的 eBPF ELF 对象 sockex1_kern.o,包含了分离的**后端**和**数据结构**定义。**加载器**和**前端**[sockex1_user.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sockex1_user.c),用于解析 ELF 文件、[创建](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/bpf_load.c#L270)所需的 map 和[加载字节码](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/bpf_load.c#L630)中内核函数 bpf_prog1(),然后**前端**像以前一样[继续运行](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sockex1_user.c#L32-L48)。 + +引入这个 "受限的 C" 抽象层所做的权衡是使 eBPF**后端**代码更容易用高级语言编写,代价是增加**加载器**的复杂性(现在需要解析 ELF 对象),而**前端**大部分不受影响。 + + + +## 3. 层级 2:自动化后端/加载器/前端的交互:BPF 编译器集合(BCC) + +并不是每个人手头都有内核源码,特别是在生产中,而且一般来说,将基于 eBPF 工具与特定的内核源码版本捆绑在一起并不是一个好主意。设计和实现 eBPF 程序的**后端**,**前端**,**加载器**和**数据结构**之间的相互作用可能是非常复杂,这也比较容易出错和耗时(特别是在 C 语言中),这被认为是一种危险的低级语言。除了这些风险之外,开发人员还经常为常见问题重新造轮子,会造成无尽的设计变化和实现。为了减轻这些痛苦,社区创建了 BCC 项目:其为编写、加载和运行 eBPF 程序提供了一个易于使用的框架,除了上面举例的 "限制性 C" 之外,还可以通过编写简单的 python 或 lua 脚本来实现。 + +BCC 项目有两个部分。 + +- 编译器集合(BCC 本身):这是用于编写 BCC 工具的框架,也是我们文章的重点。请继续阅读。 +- BCC-tools:这是一个不断增长的基于 eBPF 且经过测试的程序集,提供了使用的例子和手册。更多信息见[本教程](https://github.com/iovisor/bcc/blob/master/docs/tutorial.md)。 + +BCC 的安装包很大:它依赖于 LLVM/clang 将 "受限的 C"、python/lua 等编译成 eBPF,它还包含像 libbcc(用 C++ 编写)、libbpf 等库实现【译者注:原文 python/lua 顺序有错,另外 libcc 是 BCC 项目,libbpf 目前已经是内核代码一部分】。部分内核代码的也被复制到 BCC 代码中,所以它不需要基于完整的内核源(只需要头文件)进行构建。它可以很容易地占用数百 MB 的空间,这对于小型嵌入式设备来说不友好,我们希望这些设备也可以从 eBPF 的力量中受益。探索嵌入式设备由于大小限制问题的解决方案,将是我们在第 4 部分的重点。 + +eBPF 程序组件在 BCC 组织方式如下: + +- **后端**和**数据结构**:用 "限制性 C" 编写。可以在单独的文件中,或直接作为多行字符串存储在**加载器/前端**的脚本中,以方便使用。参见:[语言参考](https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md#bpf-c)。【译者注:在 BCC 实现中,后端代码采用面向对象的做法,真正生成字节码的时候,BCC 会进行一次预处理,转换成真正的 C 语言代码方式,这也包括 map 等数据结构的定义方面】。 + +- **加载器**和**前端**:可用非常简单的高级 python/lua 脚本编写。参见:[语言参考](https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md#bcc-python)。 + +因为 BCC 的主要目的是简化 eBPF 程序的编写,因此它尽可能地标准化和自动化:在后台完全自动化地通过 LLVM 编译 "受限的 C"**后端**,并产生一个标准的 ELF 对象格式类型,这种方式允许加载器对所有 BCC 程序只实现一次,并将其减少到最小的 API(2 行 python)。它还将**数据结构**的 API 标准化,以便于通过[**前端**](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c)访问。简而言之,它将开发者的注意力集中在编写**前端**上,而不必担心较低层次的细节问题。 + +为了最好地说明它是如何工作的,我们来看一个简单的具体例子,它是对前面文章中的 [sock_example.c](https://github.com/torvalds/linux/blob/v4.20/samples/bpf/sock_example.c) 的重新实现。该程序统计回环接口上收到了 TCP、UDP 和 ICMP 数据包的数量。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part3-Diagram2.jpeg) + +与此前直接用 C 语言编写的方式不同,用 BCC 实现具有以下优势: + +- 忘掉原始字节码:你可以用更方便的 "限制性 C" 编写所有**后端**。 +- 不需要维护任何 LLVM 的 "限制性 C" 构建逻辑。代码被 BCC 在脚本执行时直接编译和加载。 +- 没有危险的 C 代码:对于编写**前端**和**加载器**来说,Python 是一种更安全的语言,不会出现像空解引用(null dereferences)的错误。 +- 代码更简洁,你可以专注于应用程序的逻辑,而不是具体的机器问题。 +- 脚本可以被复制并在任何地方运行(假设已经安装了 BCC),它不会被束缚在内核的源代码目录中。 +- 等等。 + +在上面的例子中,我们使用了 BPF.SOCKET_FILTER 程序类型,其结果是我们挂载的 C 函数得到一个网络数据包缓冲区作为 context 上下文参数【译者注:本例中为 struct \__sk_buff \*skb】。我们还可以使用 BPF.KPROBE 程序类型来探测任意的内核函数。我们继续优化,不再使用与上面相同的接口,而是使用一个特殊的 kprobe__* 函数名称前缀,以描述一个更高级别的 BCC API。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part3-Diagram3.jpeg) + +这个例子来自于 [bcc/examples/tracing/bitehist.py](https://github.com/iovisor/bcc/blob/v0.8.0/examples/tracing/bitehist.py)。它通过挂载在 blk_account_io_completion() 内核函数来打印一个 I/O 块大小的直方图。 + +请注意:eBPF 的加载是根据 **kprobe**__blk_account_io_completion() 函数的名称自动发生的(加载器隐含实现)! 【译者注:kprobe\_\_ 前缀会被 BCC 编译代码过程中自动识别并转换成对应的附加函数调用】从用 libbpf 在 C 语言中编写和加载字节码以来,我们已经走了很远。 + + + +## 4. 层级 3:Python 太低级了:BPFftrace + +在某些用例中,BCC 仍然过于底层,例如在事件响应中检查系统时,时间至关重要,需要快速做出决定,而编写 python/"限制性 C" 会花费太多时间,因此 BPFtrace 建立在 BCC 之上,通过特定领域语言(受 AWK 和 C 启发)提供更高级别的抽象。根据[声明帖](http://www.brendangregg.com/blog/2018-10-08/dtrace-for-linux-2018.html),该语言类似于 DTrace 语言实现,也被称为 DTrace 2.0,并提供了良好的介绍和例子。 + +BPFtrace 在一个强大而安全(但与 BCC 相比仍有局限性)的语言中抽象出如此多的逻辑,是非常让人惊奇的。这个单行 shell 程序统计了每个用户进程系统调用的次数(访问[内置变量](https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md#1-builtins)、[map 函数](https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md#map-functions) 和[count()](https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md#2-count-count)文档获取更多信息)。 + +```bash +bpftrace -e 'tracepoint:raw_syscalls:sys_enter {@[pid, comm] = count();}' +``` + +BPFtrace 在某些方面仍然是一个正在进行的工作。例如,目前还没有简单的方法来定义和运行一个套接字过滤器来实现像我们之前所列举的 sock_example 这样的工具。它可能通过在 BPFtrace 中用 kprobe:netif_receive_skb 钩子完成,但这种情况下 BCC 仍然是一个更好的套接字过滤工具。在任何情况下(即使在目前的状态下),BPFTrace 对于在寻求 BCC 的全部功能之前的快速分析/调试仍然非常有用。 + + + +## 5. 层级 4:云环境中的 eBPF:IOVisor + +[IOVisor](https://www.iovisor.org/) 是 Linux 基金会的一个[合作项目](https://www.linuxfoundation.org/projects/),基于本系列文章中介绍的 eBPF 虚拟机和工具。它使用了一些非常高层次的热门概念,如 "通用输入/输出",专注于向云/数据中心开发人员和用户提供 eBPF 技术。 + +- 内核 eBPF 虚拟机成为 "IO Visor 运行时引擎" +- 编译器后端成为 "IO Visor 编译器后端" +- 一般的 eBPF 程序被重新命名为 "IO 模块" +- 实现包过滤器的特定 eBPF 程序成为 "IO 数据平面模块/组件" +- 等等。 + +考虑到原来的名字(扩展的伯克利包过滤器),并没有代表什么意义,也许所有这些重命名都是受欢迎和有价值的,特别是如果它能使更多的行业利用 eBPF 的力量。 + +IOVisor 项目创建了 [Hover 框架](https://github.com/iovisor/iomodules),也被称为 "IO 模块管理器",它是一个管理 eBPF 程序(或 IO 模块)的用户空间后台服务程序,能够将 IO 模块推送和拉取到云端,这类似于 Docker daemon 发布/获取镜像的方式。它提供了一个 CLI,Web-REST 接口,也有一个[花哨的 Web UI](https://github.com/iovisor/hoverui)。Hover 的重要部分是用 Go 编写的,因此,除了正常的 BCC 依赖性外,它还依赖于 Go 的安装,这使得它体积变得很大,这并不适合我们最终在第 4 部分中的提及的小型嵌入式设备。 + + + +## 6. 总结 + +在这一部分,我们研究了建立在 eBPF 虚拟机之上的用户空间生态系统,以提高开发人员的工作效率和简化 eBPF 程序部署。这些工具使得使用 eBPF 非常容易,用户只需 "apt-get install bpftrace" 就可以运行单行程序,或者使用 Hover 守护程序将 eBPF 程序(IO 模块)部署到 1000 台机器上。然而,所有这些工具,尽管它们给开发者和用户提供了所有的力量,但却需要很大的磁盘空间,甚至可能无法在 32 位 ARM 系统上运行,这使得它们不是很适合小型嵌入式设备,所以这就是为什么在第 4 部分我们将探索其他项目,试图缓解运行针对嵌入式设备生态系统的 eBPF 程序。 \ No newline at end of file diff --git a/content/zh/blogs/ebpf-tracing-user-processes.md b/content/zh/blogs/ebpf-tracing-user-processes.md new file mode 100644 index 000000000..21dbc5f68 --- /dev/null +++ b/content/zh/blogs/ebpf-tracing-user-processes.md @@ -0,0 +1,268 @@ +--- +title: 'eBPF 概述,第 5 部分:跟踪用户进程' +tag: 'eBPF' +keywords: 'eBPF, Linux, Python' +description: '本文主要讲解了如何使用 eBPF 程序来跟踪用户空间应用程序。' +createTime: '2021-11-23' +author: 'Adrian Ratiu' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/ebpf-guide-cover.png' +--- + +> 原文链接: https://www.collabora.com/news-and-blog/blog/2019/05/14/an-ebpf-overview-part-5-tracing-user-processes/ + +> **作者:Adrian Ratiu**
    +> **译者:狄卫华**
    +> **注:本文已取得作者本人的翻译授权** + +## 1. 前言 + +在之前的部分中,我们专注于 Linux 内核跟踪,在我们看来,基于 eBPF 的项目是最安全、最广泛可用和最有效的方法(eBPF 在 Linux 中完全是上游支持的,保证稳定的 ABI,在几乎所有的发行版中都默认启用,并可与所有其他跟踪机制集成)。 eBPF 成为内核工作的不二之选。 然而,到目前为止,我们故意避免深入讨论用户空间跟踪,因为它值得特别对待,因此我们在第 5 部分中专门讨论。 + +首先,我们将讨论为什么使用,然后我们将 eBPF 用户跟踪分为静态和动态两类分别讨论。 + +## 2. 为什么要在用户空间使用 eBPF? + +最重要的用户问题是,既然有这么多其他的调试器/性能分析器/跟踪器,这么多针对特定语言或操作系统的工具为同样的任务而开发,为什么还要使用 eBPF 来跟踪用户空间进程?答案并不简单,根据不同的使用情况,eBPF 可能不是最佳解决方案;在庞大的用户空间生态系统中,并没有一个适合所有情况的调试/跟踪的项目。 + +eBPF 跟踪具有以下优势: + +- 它为内核和用户空间提供了一个统一的跟踪接口,与其他工具([k,u]probe, (dtrace)tracepoint 等)使用的机制兼容。2015 年的文章[选择 linux 跟踪器](http://www.brendangregg.com/blog/2015-07-08/choosing-a-linux-tracer.html)虽然有些过时,但其提供了很好的见解,说明使用所有不同的工具有多困难,要花多少精力。有一个统一的、强大的、安全的、可广泛使用的框架来满足大多数跟踪的需要,是非常有价值的。一些更高级别的工具,如 Perf/SystemTap/DTrace,正在 eBPF 的基础上重写(成为 eBPF 的前端),所以了解 eBPF 有助于使用它们。 + +- eBPF 是完全可编程的。Perf/ftrace 和其他工具都需要在事后处理数据,而 eBPF 可直接在内核/应用程序中运行自定义的高级本地编译的 C/Python/Go 检测代码。它可以在多个 eBPF 事件运行之间存储数据,例如以基于函数状态/参数计算每个函数调用统计数据。 + +- eBPF 可以跟踪系统中的一切,它并不局限于特定的应用程序。例如可以在共享库上设置 uprobes 并跟踪链接和调用它的所有进程。 + +- 很多调试器需要暂停程序来观察其状态或降低运行时性能,从而难以进行实时分析,尤其是在生产工作负载上。因为 eBPF 附加了 JIT 的本地编译的检测代码,它的性能影响是最小的,不需要长时间暂停执行。 + +诚然,eBPF 也有一些缺点: + +- eBPF 不像其他跟踪器那样可以移植。该虚拟机主要是在 Linux 内核中开发的(有一个正在进行的 BSD 移植),相关的工具是基于 Linux 开发的。 +- eBPF 需要一个相当新的内核。例如对于 MIPS 的支持是在 v4.13 中加入的,但绝大多数 MIPS 设备在运行的内核都比 v4.13 老。 +- 一般来说,eBPF 不容易像语言或特定应用的用户空间调试器那样提供一样多的洞察力。例如,Emacs 的核心是一个用 C 语言编写的 ELISP 解释器:eBPF 可以通过挂载在 Emacs 运行时的 C 函数调用来跟踪/调试 ELISP 程序,但它不了解更高级别的 ELISP 语言实现,因此使用 Emacs 提供的特殊 ELISP 语言特定跟踪器和调试器变得更加有用。另一个例子是调试在 Web 浏览器引擎中运行的 JavaScript 应用程序。 +- 因为 "普通 eBPF" 在 Linux 内核中运行,所以每次 eBPF 检测用户进程时都会发生内核 - 用户上下文切换。这对于调试性能关键的用户空间代码来说可能很昂贵(也许可以使用[用户空间 eBPF 虚拟机](https://github.com/iovisor/ubpf)项目来避免这种切换成本?)。这对于调试性能关键的用户空间代码来说是很昂贵的(也许[用户空间 eBPF VM](https://github.com/iovisor/ubpf) 项目可以用来避免这种切换成本?)。这种上下文切换比正常的调试器(或像 strace 这样的工具)要便宜得多,所以它通常可以忽略不计,但在这种情况下,像 LTTng 这样能够完全运行在用户空间的跟踪器可能更合适。 + +## 3. 静态跟踪点(USDT 探针) + +静态跟踪点(tracepoint),在用户空间也被称为 USDT(用户静态定义的跟踪)探针(应用程序中感兴趣的特定位置),跟踪器可以在此处挂载检查代码执行和数据。它们由开发人员在源代码中明确定义,通常在编译时用 "--enable-trace" 等标志启用。静态跟踪点的优势在于它们不会经常变化:开发人员通常会保持稳定的静态跟踪 ABI,所以跟踪工具在不同的应用程序版本之间工作,这很有用,例如当升级 PostgreSQL 安装并遇到性能降低时。 + +### 3.1 预定义的跟踪点 +[BCC-tools](https://github.com/iovisor/bcc/tree/master/tools) 包含很多有用的且经过测试的工具,可以与特定应用程序或语言运行时定义的跟踪点进行交互。对于我们的示例,我们将跟踪 Python 应用程序。确保你在构建了 python3 时启用了 "--enable-trace" 标识,并在 python 二进制文件或 libpython(取决于你构建方式)上运行 [tplist](https://github.com/iovisor/bcc/blob/master/tools/tplist.py) 以确认跟踪点被启用: + +```bash +$ tplist -l /usr/lib/libpython3.7m.so +b'/usr/lib/libpython3.7m.so' b'python':b'import__find__load__start' +b'/usr/lib/libpython3.7m.so' b'python':b'import__find__load__done' +b'/usr/lib/libpython3.7m.so' b'python':b'gc__start' +b'/usr/lib/libpython3.7m.so' b'python':b'gc__done' +b'/usr/lib/libpython3.7m.so' b'python':b'line' +b'/usr/lib/libpython3.7m.so' b'python':b'function__return' +b'/usr/lib/libpython3.7m.so' b'python':b'function__entry' +``` + +首先我们使用 BCC 提供的一个很酷的跟踪工具 [uflow](https://github.com/iovisor/bcc/blob/master/tools/lib/uflow_example.txt),来跟踪 python 的[简单 http 服务器](https://github.com/python/cpython/blob/3.7/Lib/http/server.py)的执行流程。跟踪应该是不言自明的,箭头和缩进表示函数的进入/退出。我们在这个跟踪中看到的是一个工作线程如何在 CPU 3 上退出,而主线程则准备在 CPU 0 上为其他传入的 http 请求提供服务。 + +```bash +$ python -m http.server >/dev/null & sudo ./uflow -l python $! +[4] 11727 +Tracing method calls in python process 11727... Ctrl-C to quit. +CPU PID TID TIME(us) METHOD +3 11740 11757 7.034 /usr/lib/python3.7/_weakrefset.py._remove +3 11740 11757 7.034 /usr/lib/python3.7/threading.py._acquire_restore +0 11740 11740 7.034 /usr/lib/python3.7/threading.py.__exit__ +0 11740 11740 7.034 /usr/lib/python3.7/socketserver.py.service_actions +0 11740 11740 7.034 /usr/lib/python3.7/selectors.py.select +0 11740 11740 7.532 /usr/lib/python3.7/socketserver.py.service_actions +0 11740 11740 7.532 +``` + +接下来,我们希望在跟踪点被命中时运行我们的自定义代码,因此我们不完全依赖 BCC 提供的任何工具。 以下示例将自身挂钩到 python 的 function__entry 跟踪点(请参阅 [python 检测](https://docs.python.org/3/howto/instrumentation.html)文档)并在有人下载文件时通知我们: + +```python +#!/usr/bin/env python +from bcc import BPF, USDT +import sys + +bpf = """ +#include + +static int strncmp(char *s1, char *s2, int size) {for (int i = 0; i < size; ++i) + if (s1[i] != s2[i]) + return 1; + return 0; +} + +int trace_file_transfers(struct pt_regs *ctx) { + uint64_t fnameptr; + char fname[128]={0}, searchname[9]="copyfile"; + + bpf_usdt_readarg(2, ctx, &fnameptr); + bpf_probe_read(&fname, sizeof(fname), (void *)fnameptr); + + if (!strncmp(fname, searchname, sizeof(searchname))) + bpf_trace_printk("Someone is transferring a file!\\n"); + return 0; +}; +""" + +u = USDT(pid=int(sys.argv[1])) +u.enable_probe(probe="function__entry", fn_name="trace_file_transfers") +b = BPF(text=bpf, usdt_contexts=[u]) +while 1: + try: + (_, _, _, _, ts, msg) = b.trace_fields() + except ValueError: + continue + print("%-18.9f %s" % (ts, msg)) +``` + +我们通过再次附加到简单的 http-server 进行测试: + +```bash +$ python -m http.server >/dev/null & sudo ./trace_simplehttp.py $! +[14] 28682 +34677.450520000 b'Someone is transferring a file!' +``` + +上面的例子告诉我们什么时候有人在下载文件,但它不能比这更详细的信息,比如关于谁在下载、下载什么文件等。这是因为 python 只默认启用了几个非常通用的跟踪点(模块加载、函数进入/退出等)。为了获得更多的信息,我们必须在感兴趣的地方定义我们自己的跟踪点,以便我们能够提取相关的数据。 + +### 3.2 定义我们自己的跟踪点 + +到目前为止,我们只使用别人定义的跟踪点,但是如果我们的应用程序并没有提供任何跟踪点,或者我们需要添加比现有跟踪点更多的信息,那么我们将不得不添加自己的跟踪点。 + +添加跟踪点方式有多种,例如 python-core 通过 [pydtrace.h](https://github.com/python/cpython/blob/v3.7.2/Include/pydtrace.h) 和 [pydtrace.d](https://github.com/python/cpython/blob/v3.7.2/Include/pydtrace.d) 使用 systemtap 的开发包 "systemtap-sdt-dev",但我们将采取另一种方法,使用 [libstapsdt](https://github.com/sthima/libstapsdt),因为它有一个更简单的 API,更轻巧(只依赖于 libelf),并支持多种语言绑定。为了保持一致性,我们再次把重点放在 python 上,但是跟踪点也可以用其他语言添加,这里有[一个 C 语言示例](https://github.com/sthima/libstapsdt/blob/master/example/demo.c)。 + +首先,我们给简单的 http 服务器打上补丁,公开跟踪点。代码应该是不言自明的:注意跟踪点的名字 **file_transfer** 及其参数,足够存储两个字符串指针和一个 32 位无符号整数,代表客户端 IP 地址,文件路径和文件大小。 + +```python +diff --git a/usr/lib/python3.7/http/server.py b/usr/lib/python3.7/http/server.py +index ca2dd50..af08e10 100644 +--- a/usr/lib/python3.7/http/server.py ++++ b/usr/lib/python3.7/http/server.py +@@ -107,6 +107,13 @@ from functools import partial + + from http import HTTPStatus + ++import stapsdt ++provider = stapsdt.Provider("simplehttp") ++probe = provider.add_probe("file_transfer", ++ stapsdt.ArgTypes.uint64, ++ stapsdt.ArgTypes.uint64, ++ stapsdt.ArgTypes.uint32)+provider.load() + + # Default error message template + DEFAULT_ERROR_MESSAGE = """\ +@@ -650,6 +657,8 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): + f = self.send_head() + if f: + try: ++ path = self.translate_path(self.path) ++ probe.fire(self.address_string(), path, os.path.getsize(path)) + self.copyfile(f, self.wfile) + finally: + f.close() +``` + +运行打过补丁的服务器,我们可以使用 tplist 验证我们的 **file_transfer** 跟踪点在运行时是否存在: + +```bash +$ python -m http.server >/dev/null 2>&1 & tplist -p $! +[1] 13297 +b'/tmp/simplehttp-Q6SJDX.so' b'simplehttp':b'file_transfer' +b'/usr/lib/libpython3.7m.so.1.0' b'python':b'import__find__load__start' +b'/usr/lib/libpython3.7m.so.1.0' b'python':b'import__find__load__done' +``` + +我们将对上述示例中的跟踪器示例代码进行以下最重要的修改: + +- 它将其逻辑挂接到我们自定义的 **file_transfer** 跟踪点。 +- 它使用 [PERF EVENTS](https://perf.wiki.kernel.org/index.php/Tutorial) 来存储可以将任意结构传递到用户空间的数据,而不是我们之前使用的 ftrace 环形缓存区只能传输单个字符串。 +- 它**不**使用 **bpf_usdt_readarg** 来获取 USDT 提供的指针,而是直接在处理程序函数签名中声明它们。 这是一个显着的质量改善,可用于所有处理程序。 +- 此跟踪器明确使用 **[python2](https://perf.wiki.kernel.org/index.php/Tutorial)**,即使到目前为止我们所有的示例(包括上面的 python http.server 补丁) 使用 **[python3](https://perf.wiki.kernel.org/index.php/Tutorial)**。 希望将来所有 BCC API 和文档都能移植到 python 3。 + +```python + #!/usr/bin/env python2 + from bcc import BPF, USDT + import sys + + bpf = """ + #include + + BPF_PERF_OUTPUT(events); + + struct file_transf {char client_ip_str[20]; + char file_path[300]; + u32 file_size; + u64 timestamp; + }; + + int trace_file_transfers(struct pt_regs *ctx, char *ipstrptr, char *pathptr, u32 file_size) {struct file_transf ft = {0}; + + ft.file_size = file_size; + ft.timestamp = bpf_ktime_get_ns(); + bpf_probe_read(&ft.client_ip_str, sizeof(ft.client_ip_str), (void *)ipstrptr); + bpf_probe_read(&ft.file_path, sizeof(ft.file_path), (void *)pathptr); + + events.perf_submit(ctx, &ft, sizeof(ft)); + return 0; + }; + """ + + def print_event(cpu, data, size): + event = b["events"].event(data) + print("{0}: {1} is downloding file {2} ({3} bytes)".format(event.timestamp, event.client_ip_str, event.file_path, event.file_size)) + + u = USDT(pid=int(sys.argv[1])) + u.enable_probe(probe="file_transfer", fn_name="trace_file_transfers") + b = BPF(text=bpf, usdt_contexts=[u]) + b["events"].open_perf_buffer(print_event) + + while 1: + try: + b.perf_buffer_poll() + except KeyboardInterrupt: + exit() +``` + +跟踪已打过补丁的服务器: + +```bash +$ python -m http.server >/dev/null 2>&1 & sudo ./trace_stapsdt.py $! +[1] 5613 +325540469950102: 127.0.0.1 is downloading file /home/adi/ (4096 bytes) +325543319100447: 127.0.0.1 is downloading file /home/adi/.bashrc (827 bytes) +325552448306918: 127.0.0.1 is downloading file /home/adi/workspace/ (4096 bytes) +325563646387008: 127.0.0.1 is downloading file /home/adi/workspace/work.tar (112640 bytes) +(...) +``` + +上面自定义的 **file_transfer** 跟踪点看起来很简单(直接 python 打印或日志记录调用可能有相同的效果),但它提供的机制非常强大:良好放置的跟踪点保证 ABI 稳定性,提供动态运行的能力安全、本地快速、**可编程**逻辑可以非常有助于快速分析和修复各种问题,而无需重新启动有问题的应用程序(重现问题可能需要很长时间)。 + +## 4. 动态探针(uprobes) + +上面举例说明的静态跟踪点的问题在于,它们需要在源代码中明确定义,并且在修改跟踪点时需要重新构建应用程序。保证现有跟踪点的 ABI 稳定性对维护人员如何重新构建/重写跟踪点数据的代码施加了限制。因此,在某些情况下,完全运行时动态用户空间探测器(uprobes)是首选:它们以特别的方式直接在运行应用程序的内存中进行探测,而无需任何特殊的源代码定义。动态探测器可能会比较容易在应用程序版本之间失效,但即便如此,它们对于实时调试正在运行的实例也很有用。 + +虽然静态跟踪点对于跟踪用 Python 或 Java 等高级语言编写的应用程序很有用,但 uprobes 对此不太有用,因为它们工作比较底层,并且不了解语言运行时实现(静态跟踪点之所以可以工作,因为开发人员自行承担公开高级应用程序的相关数据)。然而,动态探测器对于调试语言实现/引擎本身或用没有运行时的语言(如 C)编写的应用程序很有用。 + +可以将 uprobe 添加到优化过(stripped)的二进制文件中,但用户必须手动计算进程内内存偏移位置,uprobe 应通过 objdump 和 /proc//maps 等工具附加到该位置([参见示例](https://github.com/torvalds/linux/blob/v4.20/Documentation/trace/uprobetracer.rst)),但这种方式比较痛苦且不可移植。 由于大多数发行版都提供调试符号包(或使用调试符号构建的快速方法)并且 BCC 使得使用带有符号名称解析的 uprobes 变得简单,因此绝大多数动态探测使用都是以这种方式进行的。 + +[gethostlatency](https://github.com/iovisor/bcc/blob/master/tools/gethostlatency.py) BCC 工具通过将 uprobes 附加到 gethostbyname 和 libc 中的相关函数来打印 DNS 请求延迟。 要验证 libc 未优化(stripped)以便可以运行该工具(否则会引发 sybol-not-found 错误): + +```bash +$ file /usr/lib/libc-2.28.so +/usr/lib/libc-2.28.so: ELF 64-bit LSB shared object, x86-64, version 1 (GNU/Linux), dynamically linked, (...), not stripped +$ nm -na /usr/lib/libc-2.28.so | grep -i -e getaddrinfo +0000000000000000 a getaddrinfo.c +``` + +[gethostlatency](https://github.com/iovisor/bcc/blob/master/tools/gethostlatency.py) 代码与我们上面检查的跟踪点示例非常相似(并且在某些地方相同,它还使用 BPF_PERF_OUTPUT) ,所以我们不会在这里完整地发布它。 最相关的区别是使用 [BCC uprobe API](https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md#4-attach_uprobe): + +```python +b.attach_uprobe(name="c", sym="getaddrinfo", fn_name="do_entry", pid=args.pid) +b.attach_uretprobe(name="c", sym="getaddrinfo", fn_name="do_return", pid=args.pid) +``` + +这里需要理解和记住的关键思想是:只要对我们的 BCC eBPF 程序做一些小的改动,我们就可以通过静态和动态探测来跟踪非常不同的应用程序、库甚至是内核。之前我们是静态跟踪 Python 应用程序,现在我们是动态地测量 libc 的主机名解析延时。通过对这些小的(小于 150LOC,很多是模板)例子进行类似的修改,可在运行的系统中跟踪任何内容,这非常安全,没有崩溃的风险或其他工具引起的问题(如调试器应用程序暂停/停顿)。 + +## 5. 总结 + +在第 5 部分中,我们研究了如何使用 eBPF 程序来跟踪用户空间应用程序。 使用 eBPF 完成这项任务的最大优势是它提供了一个统一的接口来安全有效地跟踪整个系统:可以在应用程序中重现错误,然后进一步跟踪到库或内核中,通过统一的编程框架/接口提供完整的系统可见性。 然而,eBPF 并不是银弹,尤其是在调试用高级语言编写的应用程序时,特定语言的工具可以更好地提供洞察力,或者对于那些运行旧版本 Linux 内核或需要非 Linux 系统的应用程序。 diff --git a/content/zh/blogs/ebpf-working-with-embedded-systems.md b/content/zh/blogs/ebpf-working-with-embedded-systems.md new file mode 100644 index 000000000..df1d2aa54 --- /dev/null +++ b/content/zh/blogs/ebpf-working-with-embedded-systems.md @@ -0,0 +1,239 @@ +--- +title: 'eBPF 概述,第 4 部分:在嵌入式系统运行' +tag: 'eBPF' +keywords: 'eBPF' +description: '在这一部分中,我们将从另外一个视角来分析项目,尝试解决嵌入式 Linux 系统所面临的一些独特的问题:如需要非常小的自定义操作系统镜像,不能容纳完整的 BCC LLVM 工具链/python 安装,或试图避免同时维护主机的交叉编译(本地)工具链和交叉编译的目标编译器工具链,以及其相关的构建逻辑,即使在使用像 OpenEmbedded/Yocto 这样的高级构建系统时也很重要。' +createTime: '2021-11-10' +author: 'Adrian Ratiu' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/ebpf-guide-cover.png' +--- + +> 原文链接: https://www.collabora.com/news-and-blog/blog/2019/05/06/an-ebpf-overview-part-4-working-with-embedded-systems/ + +> **作者:Adrian Ratiu**
    +> **译者:狄卫华**
    +> **注:本文已取得作者本人的翻译授权** + +## 1. 前言 + +在本系列的[第 1 部分](https://kubesphere.com.cn/blogs/ebpf-guide/)和[第 2 部分](https://kubesphere.com.cn/blogs/ebpf-machine-bytecode/),我们介绍了 eBPF 虚拟机内部工作原理,在[第 3 部分](https://kubesphere.com.cn/blogs/ebpf-software-stack/)我们研究了基于底层虚拟机机制之上开发和使用 eBPF 程序的主流方式。 + +在这一部分中,我们将从另外一个视角来分析项目,尝试解决嵌入式 Linux 系统所面临的一些独特的问题:如需要非常小的自定义操作系统镜像,不能容纳完整的 BCC LLVM 工具链/python 安装,或试图避免同时维护主机的交叉编译(本地)工具链和交叉编译的目标编译器工具链,以及其相关的构建逻辑,即使在使用像 OpenEmbedded/Yocto 这样的高级构建系统时也很重要。 + +## 2. 关于可移植性 + +在第 3 部分研究的运行 eBPF/BCC 程序的主流方式中,可移植性并不是像在嵌入式设备上面临的问题那么大:eBPF 程序是在被加载的同一台机器上编译的,使用已经运行的内核,而且头文件很容易通过发行包管理器获得。嵌入式系统通常运行不同的 Linux 发行版和不同的处理器架构,与开发人员的计算机相比,有时具有重度修改或上游分歧的内核,在构建配置上也有很大的差异,或还可能使用了只有二进制的模块。 + +eBPF 虚拟机的字节码是通用的(并未与特定机器相关),所以一旦编译好 eBPF 字节码,将其从 x86_64 移动到 ARM 设备上并不会引起太多问题。当字节码探测内核函数和数据结构时,问题就开始了,这些函数和数据结构可能与目标设备的内核不同或者会不存在,所以至少目标设备的内核头文件必须存在于构建 eBPF 程序字节码的主机上。新的功能或 eBPF 指令也可能被添加到以后的内核中,这可以使 eBPF 字节码向前兼容,但不能在内核版本之间向后兼容(参见[内核版本与 eBPF 功能](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md))。建议将 eBPF 程序附加到稳定的内核 ABI 上,如跟踪点(tracepoint),这可以缓解常见的可移植性。 + +最近一个重要的工作已经开始,通过在 LLVM 生成的 eBPF 对象代码中嵌入数据类型信息,通过增加 BTF(BTF 类型格式)数据,以增加 eBPF 程序的可移植性(CO-RE 一次编译,到处运行)。更多信息见这里的[补丁](https://lwn.net/Articles/750695/)和[文章](https://lwn.net/Articles/773198/)。这很重要,因为 BTF 涉及到 eBPF 软件技术栈的所有部分(内核虚拟机和验证器、clang/LLVM 编译器、BCC 等),但这种方式可带来很大的便利,允许重复使用现有的 BCC 工具,而不需要特别的 eBPF 交叉编译和在嵌入式设备上安装 LLVM 或运行 BPFd。截至目前,CO-RE BTF 工作仍处于早期开发阶段,还需要付出相当多的工作才能可用【译者注:当前在高版本内核已经可以使用或者编译内核时启用了 BTF 编译选项】。也许我们会在其完全可用后再发表一篇博文。 + +## 3. BPFd + +[BPFd](https://lwn.net/Articles/744522/)(项目地址 )更像是一个为 Android 设备开发的概念验证,后被放弃,转而通过 [adeb](https://github.com/joelagnel/adeb) 包运行一个完整的设备上的 BCC 工具链【译者注:BCC 在 adeb 的编译文档参见[这里](https://github.com/joelagnel/adeb/blob/master/BCC.md)】。如果一个设备足够强大,可以运行 Android 和 Java,那么它也可能可以安装 BCC/LLVM/python。尽管这个实现有些不完整(通信是通过 Android USB 调试桥或作为一个本地进程完成的,而不是通过一个通用的传输层),但这个设计很有趣,有足够时间和资源的人可以把它拿起来合并,继续搁置的 [PR 工作](https://github.com/iovisor/bcc/pull/1675)。 + +简而言之,BPFd 是一个运行在嵌入式设备上的守护程序,作为本地内核/libbpf 的一个远程过程调用(RPC)接口。Python 在主机上运行,调用 BCC 来编译/部署 eBPF 字节码,并通过 BPFd 创建/读取 map。BPFd 的主要优点是,所有的 BCC 基础设施和脚本都可以工作,而不需要在目标设备上安装 BCC、LLVM 或 python,BPFd 二进制文件只有 100kb 左右的大小,并依赖 libc。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part4-Diagram1.jpeg) + +## 4. Ply + +[ply](https://wkz.github.io/ply/) 项目实现了一种与 BPFtrace 非常相似的高级领域特定语言(受到 AWK 和 C 的启发),其明确的目的是将运行时的依赖性降到最低。它只依赖于一个现代的 libc(不一定是 GNU 的 libc)和 shell(与 sh 兼容)。Ply 本身实现了一个 eBPF 编译器,需要根据目标设备的内核头文件进行构建,然后作为一个单一的二进制库和 shell 包装器部署到目标设备上。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part4-Diagram2.jpeg) + +为了更好解释 ply,我们把第 3 部分中的 BPFtrace 例子和与 ply 实现进行对比: + +- BPFtrace:要运行该例子,你需要数百 MB 的 LLVM/clang、libelf 和其他依赖项: + + `bpftrace -e 'tracepoint:raw_syscalls:sys_enter {@[pid, comm] = count();}'` + +- ply:你只需要一个 ~50kb 的二进制文件,它产生的结果是相同的,语法几乎相同: + + `ply 'tracepoint:raw_syscalls/sys_enter {@[pid, comm] = count();}'` + +Ply 仍在大量开发中(最近的 v2.0 版本是完全重写的)【译者注:当前最新版本为 2.1.1,最近一次代码提交是 8 个月前,活跃度一般】,除了一些示例之外,该语言还不不稳定或缺乏文档,它不如完整的 BCC 强大,也没有 BPFtrace 丰富的功能特性,但它对于通过 ssh 或串行控制台快速调试远程嵌入式设备仍然非常有用。 + + + +## 5. Gobpf + +[Gobpf](https://github.com/iovisor/gobpf) 及其合并的子项目(goebpf, gobpf-elf-loader),是 IOVisor 项目的一部分,为 BCC 提供 Golang 语言绑定。eBPF 的内核逻辑仍然用 "限制性 C" 编写,并由 LLVM 编译,只有标准的 python/lua 用户空间脚本被 Go 取代。这个项目对嵌入式设备的意义在于它的 eBPF [elf 加载模块](https://github.com/iovisor/gobpf/tree/master/elf),其可以被交叉编译并在嵌入式设备上独立运行,以加载 eBPF 程序至内核并与与之交互。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eBPF-Part4-Diagram3.jpeg) + +值得注意的是,go 加载器可以被写成通用的(我们很快就会看到),因此它可以加载和运行任何 eBPF 字节码,并在本地重新用于多个不同的跟踪会话。 + +使用 gobpf 很痛苦的,主要是因为缺乏文档。目前最好的 "文档" 是 [tcptracer 源码](https://github.com/weaveworks/tcptracer-bpf),它相当复杂(他们使用 kprobes 而不依赖于特定的内核版本!),但从它可以学到很多。Gobpf 本身也是一项正在进行的工作:虽然 elf 加载器相当完整,并支持加载带有套接字、(k|u)probes、tracepoints、perf 事件等加载的 eBPF ELF 对象,但 bcc go 绑定模块还不容易支持所有这些功能。例如,尽管你可以写一个 socket_ilter ebpf 程序,将其编译并加载到内核中,但你仍然不能像 BCC 的 python 那样从 go 用户空间轻松地与 eBPF 进行交互,BCC 的 API 更加成熟和用户友好。无论如何,gobpf 仍然比其他具有类似目标的项目处于更好的状态。 + +让我们研究一个简单的例子来说明 gobpf 如何工作的。首先,我们将在本地 x86_64 机器上运行它,然后交叉编译并在 32 位 ARMv7 板上运行它,比如流行的 Beaglebone 或 Raspberry Pi。我们的文件目录结构如下: + +```bash +$ find . -type f +./src/open-example.go +./src/open-example.c +./Makefile +``` + +**open-example.go**:这是建立在 gobpf/elf 之上的 eBPF ELF 加载器。它把编译好的 "限制性 C" ELF 对象作为参数,加载到内核并运行,直到加载器进程被杀死,这时内核会自动卸载 eBPF 逻辑【译者注:通常情况是这样的,也有场景加载器退出,ebpf 程序继续运行的】。我们有意保持加载器的简单性和通用性(它加载在对象文件中发现的任何探针),因此加载器可以被重复使用。更复杂的逻辑可以通过使用 [gobpf 绑定](https://github.com/iovisor/gobpf/blob/master/bcc/module.go) 模块添加到这里。 + +```go +package main + +import ( + "fmt" + "os" + "os/signal" + "github.com/iovisor/gobpf/elf" +) + +func main() {mod := elf.NewModule(os.Args[1]) + + err := mod.Load(nil); + if err != nil {fmt.Fprintf(os.Stderr, "Error loading'%s'ebpf object: %v\n", os.Args[1], err)os.Exit(1) + } + + err = mod.EnableKprobes(0) + if err != nil {fmt.Fprintf(os.Stderr, "Error loading kprobes: %v\n", err) + os.Exit(1) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, os.Kill) + // ... +} +``` + +**open-example.c**:这是上述加载器加载至内核的 "限制性 C" 源代码。它挂载在 do_sys_open 函数,并根据 [ftrace format](https://raw.githubusercontent.com/torvalds/linux/v4.20/Documentation/trace/ftrace.rst) 将进程命令、PID、CPU、打开文件名和时间戳打印到跟踪环形缓冲区,(详见 "输出格式" 一节)。打开的文件名作为 [do_sys_open call](https://github.com/torvalds/linux/blob/v4.20/fs/open.c#L1048) 的第二个参数传递,可以从代表函数入口的 CPU 寄存器的上下文结构中访问。 + +```c +#include +#include +#include + +SEC("kprobe/do_sys_open") +int kprobe__do_sys_open(struct pt_regs *ctx) +{char file_name[256]; + + bpf_probe_read(file_name, sizeof(file_name), PT_REGS_PARM2(ctx)); + + char fmt[] = "file %s\n"; + bpf_trace_printk(fmt, sizeof(fmt), &file_name); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +__u32 _version SEC("version") = 0xFFFFFFFE; +``` + +在上面的代码中,我们定义了特定的 "SEC" 区域,这样 gobpf 加载器就可获取到哪里查找或加载内容的信息。在我们的例子中,区域为 kprobe、license 和 version。特殊的 0xFFFFFFFE 值告诉加载器,这个 eBPF 程序与任何内核版本都是兼容的,因为打开系统调用而破坏用户空间的机会接近于 0。 + +**Makefile**:这是上述两个文件的构建逻辑。注意我们是如何在 include 路径中加入 "arch/x86/..." 的;在 ARM 上它将是 "arch/arm/..."。 + +```makefile +SHELL=/bin/bash -o pipefail +LINUX_SRC_ROOT="/home/adi/workspace/linux" +FILENAME="open-example" + +ebpf-build: clean go-build + clang \ + -D__KERNEL__ -fno-stack-protector -Wno-int-conversion \ + -O2 -emit-llvm -c "src/${FILENAME}.c" \ + -I ${LINUX_SRC_ROOT}/include \ + -I ${LINUX_SRC_ROOT}/tools/testing/selftests \ + -I ${LINUX_SRC_ROOT}/arch/x86/include \ + -o - | llc -march=bpf -filetype=obj -o "${FILENAME}.o" + +go-build: + go build -o ${FILENAME} src/${FILENAME}.go + +clean: + rm -f ${FILENAME}* +``` + +运行上述 makefile 在当前目录下产生两个新文件: + +* open-example:这是编译后的 src/*.go 加载器。它只依赖于 libc 并且可以被复用来加载多个 eBPF ELF 文件运行多个跟踪。 + +* open-example.o:这是编译后的 eBPF 字节码,将在内核中加载。 + +“open-example" 和 "open-example.o" ELF 二进制文件可以进一步合并成一个;加载器可以包括 eBPF 二进制文件作为资产,也可以像 [tcptracer](https://github.com/weaveworks/tcptracer-bpf/blob/master/pkg/tracer/tcptracer-ebpf.go#L80) 那样在其源代码中直接存储为字节数。然而,这超出了本文的范围。 + +运行例子显示以下输出(见 [ftrace 文档]((https://raw.githubusercontent.com/torvalds/linux/v4.20/Documentation/trace/ftrace.rst)) 中的 "输出格式" 部分)。 + +```bash +# (./open-example open-example.o &) && cat /sys/kernel/debug/tracing/trace_pipe +electron-17494 [007] ...3 163158.937350: 0: file /proc/self/maps +systemd-1 [005] ...3 163160.120796: 0: file /proc/29261/cgroup +emacs-596 [006] ...3 163163.501746: 0: file /home/adi/ +(...) +``` + +沿用我们在本系列的第 3 部分中定义的术语,我们的 eBPF 程序有以下部分组成: + - **后端**:是 open-example.o ELF 对象。它将数据写入内核跟踪环形缓冲区。 + + - **加载器**:这是编译过的 open-example 二进制文件,包含 gobpf/elf 加载器模块。只要它运行,数据就会被添加到跟踪缓冲区中。 + + - **前端**:这就是 `cat /sys/kernel/debug/tracing/trace_pipe`。非常 UNIX 风格。 + + - **数据结构**:内核跟踪环形缓冲区。 + +现在将我们的例子交叉编译为 32 位 ARMv7。 基于你的 ARM 设备运行的内核版本: +- 内核版本>=5.2:只需改变 makefile,就可以交叉编译与上述相同的源代码。 +- 内核版本<5.2:除了使用新的 makefile 外,还需要将 PT_REGS_PARM* 宏从 [这个 patch](https://lore.kernel.org/bpf/20190304205019.15071-1-adrian.ratiu@collabora.com/) 复制到 "受限制 C" 代码。 + +新的 makefile 告诉 LLVM/Clang,eBPF 字节码以 ARMv7 设备为目标,使用 32 位 eBPF 虚拟机子寄存器地址模式,以便虚拟机可以正确访问本地处理器提供的 32 位寻址内存(还记得第 2 部分中介绍的所有 eBPF 虚拟机寄存器默认为 64 位宽),设置适当的包含路径,然后指示 Go 编译器使用正确的交叉编译设置。在运行这个 makefile 之前,需要一个预先存在的交叉编译器工具链,它被指向 CC 变量。 + +```makefile +SHELL=/bin/bash -o pipefail +LINUX_SRC_ROOT="/home/adi/workspace/linux" +FILENAME="open-example" + +ebpf-build: clean go-build + clang \ + --target=armv7a-linux-gnueabihf \ + -D__KERNEL__ -fno-stack-protector -Wno-int-conversion \ + -O2 -emit-llvm -c "src/${FILENAME}.c" \ + -I ${LINUX_SRC_ROOT}/include \ + -I ${LINUX_SRC_ROOT}/tools/testing/selftests \ + -I ${LINUX_SRC_ROOT}/arch/arm/include \ + -o - | llc -march=bpf -filetype=obj -o "${FILENAME}.o" + +go-build: + GOOS=linux GOARCH=arm CGO_ENABLED=1 CC=arm-linux-gnueabihf-gcc \ + go build -o ${FILENAME} src/${FILENAME}.go + +clean: + rm -f ${FILENAME}* +``` + +运行新的 makefile,并验证产生的二进制文件已经被正确地交叉编译: + +```bash +[adi@iwork]$ file open-example* +open-example: ELF 32-bit LSB executable, ARM, EABI5 version 1 (SYSV), dynamically linked, interpreter (...), stripped +open-example.o: ELF 64-bit LSB relocatable, *unknown arch 0xf7* version 1 (SYSV), not stripped +``` + +然后将加载器和字节码复制到设备上,与在 x86_64 主机上使用上述相同的命令来运行。记住,只要修改和重新编译 C eBPF 代码,加载器就可以重复使用,用于运行不同的跟踪。 + +```bash +[root@ionelpi adi]# (./open-example open-example.o &) && cat /sys/kernel/debug/tracing/trace_pipe +ls-380 [001] d..2 203.410986: 0: file /etc/ld-musl-armhf.path +ls-380 [001] d..2 203.411064: 0: file /usr/lib/libcap.so.2 +ls-380 [001] d..2 203.411922: 0: file / +zcat-397 [002] d..2 432.676010: 0: file /etc/ld-musl-armhf.path +zcat-397 [002] d..2 432.676237: 0: file /usr/lib/libtinfo.so.5 +zcat-397 [002] d..2 432.679431: 0: file /usr/bin/zcat +gzip-397 [002] d..2 432.693428: 0: file /proc/ +gzip-397 [002] d..2 432.693633: 0: file config.gz +``` + +由于加载器和字节码加起来只有 2M 大小,这是一个在嵌入式设备上运行 eBPF 的相当好的方法,而不需要完全安装 BCC/LLVM。 + + + +## 6. 总结 + +在本系列的第 4 部分,我们研究了可以用于在小型嵌入式设备上运行 eBPF 程序的相关项目。不幸的是,当前使用这些项目还是比较很困难的:它们有的被遗弃或缺乏人力,在早期开发时一切都在变化,或缺乏基本的文档,需要用户深入到源代码中并自己想办法解决。正如我们所看到的,gobpf 项目作为 BCC/python 的替代品是最有活力的,而 ply 也是一个有前途的 BPFtrace 替代品,其占用空间最小。随着更多的工作投入到这些项目中以降低使用者的门槛,eBPF 的强大功能可以用于资源受限的嵌入式设备,而无需移植/安装整个 BCC/LLVM/python/Hover 技术栈。 diff --git a/content/zh/blogs/edge-node-monitoring.md b/content/zh/blogs/edge-node-monitoring.md index 544c3eb4c..5eaf66d58 100644 --- a/content/zh/blogs/edge-node-monitoring.md +++ b/content/zh/blogs/edge-node-monitoring.md @@ -1,6 +1,6 @@ --- title: '从 KubeSphere 3.1.0 边缘节点的监控问题排查,简要解析边缘监控原理' -tag: 'KubeSphere, monitoring' +tag: 'KubeSphere, 边缘节点' keywords: 'Kubernetes, KubeSphere, monitoring, 边缘节点' description: 'KubeSphere 3.1.0 通过集成 KubeEdge,将节点和资源的管理延伸到了边缘,也是 KubeSphere 正式支持边缘计算的第一个版本。本文作者也第一时间搭建和试用了边缘节点相关的功能,但是在边缘节点纳管之后遇到了一些监控的小问题,在排查过程中也顺带了解了一下 KubeSphere 对于边缘节点的监控原理,发出来和大家分享,方便其他的开发者能够更快的排查问题或进行二次开发。' createTime: '2021-06-10' diff --git a/content/zh/blogs/faas-openfunction.md b/content/zh/blogs/faas-openfunction.md new file mode 100644 index 000000000..fc48a4836 --- /dev/null +++ b/content/zh/blogs/faas-openfunction.md @@ -0,0 +1,294 @@ +--- +title: 'OpenFunction:从 0 到 1,打造新一代开源函数计算平台' +tag: 'Serverless, FaaS, OpenFunction' +keywords: 'Serverless, FaaS, OpenFunction' +description: '本文介绍函数计算概念、参考架构及发展趋势,介绍云原生 Serverless 领域的最新进展,进而阐述如何利用云原生技术从 0 到 1 打造新一代开源函数计算平台 OpenFunction。' +createTime: '2021-12-03' +author: '霍秉杰' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/serverless-faas-openfunction.png' +--- + +**无服务器计算**,即通常所说的 Serverless,已经成为当前云原生领域炙手可热的名词,是继 IaaS,PaaS 之后云计算发展的下一波浪潮。Serverless 强调的是一种架构思想和服务模型,让开发者无需关心基础设施(服务器等),而是专注到应用程序业务逻辑上。加州大学伯克利分校在论文 A Berkeley View on Serverless Computing 中给出了两个关于 Serverless 的核心观点: + ++ 有服务的计算并不会消失,但随着 Serverless 的成熟,有服务计算的重要性会逐渐降低。 ++ Serverless 最终会成为云时代的计算范式,它能够在很大程度上替代有服务的计算模式,并给 Client-Server 时代划上句号。 + +那么什么是 Serverless 呢? + +## Serverless 介绍 + +关于什么是 Serverless,加州大学伯克利分校在之前提到的论文中也给出了明确定义:`Serverless computing = FaaS + BaaS`。云服务按抽象程度从底层到上层传统的分类是硬件、云平台基本组件、PaaS、应用,但 PaaS 层的理想状态是具备 Serverless 的能力,因此这里我们将 PaaS 层替换成了 Serverless,即下图中的黄色部分。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112011620107.png) + +Serverless 包含两个组成部分 **BaaS** 和 **FaaS**,其中对象存储、关系型数据库以及 MQ 等云上基础支撑服务属于 BaaS(后端即服务),这些都是每个云都必备的基础服务,FaaS(函数即服务)才是 Serverless 的核心。 + +## 现有开源 Serverless 平台分析 + +KubeSphere 社区从 2020 年下半年开始对 Serverless 领域进行深度调研。经过一段时间的调研后,我们发现: + +- 现有开源 FaaS 项目绝大多数启动较早,大部分都在 Knative 出现前就已经存在了; +- Knative 是一个非常杰出的 Serverless 平台,但是 Knative Serving 仅仅能运行应用,不能运行函数,还不能称之为 FaaS 平台; +- Knative Eventing 也是非常优秀的事件管理框架,但是设计有些过于复杂,用户用起来有一定门槛; +- OpenFaaS 是比较流行的 FaaS 项目,但是技术栈有点老旧,依赖于 Prometheus 和 Alertmanager 进行 Autoscaling,在云原生领域并非最专业和敏捷的做法; +- 近年来云原生 Serverless 相关领域陆续涌现出了很多优秀的开源项目如 [KEDA](https://keda.sh/)、 [Dapr](https://dapr.io/)、 [Cloud Native Buildpacks(CNB)](https://buildpacks.io/)、 [Tekton](https://tekton.dev/)、 [Shipwright](https://shipwright.io/) 等,为创建新一代开源 FaaS 平台打下了基础。 + +综上所述,我们调研的结论就是:**现有开源 Serverless 或 FaaS 平台并不能满足构建现代云原生 FaaS 平台的要求,而云原生 Serverless 领域的最新进展却为构建新一代 FaaS 平台提供了可能。** + +## 新一代 FaaS 平台框架设计 + +如果我们要重新设计一个更加现代的 FaaS 平台,它的架构应该是什么样子呢?理想中的 FaaS 框架应该按照函数生命周期分成几个重要的部分:函数框架 (Functions framework)、函数构建 (Build)、函数服务 (Serving) 和事件驱动框架 (Events Framework)。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112011821074.png) + +作为 FaaS,首先得有一个 Function Spec 来定义函数该怎么写,有了函数之后,还要转换成应用,这个转换的过程就是靠**函数框架**来完成;如果应用想在云原生环境中运行,就得构建容器镜像,构建流程依赖**函数构建**来完成;构建完镜像后,应用就可以部署到**函数服务**的运行时中;部署到运行时之后,这个函数就可以被外界访问了。 + +下面我们将重点阐述函数框架、函数构建和函数服务这几个部分的架构设计。 + +### 函数框架 (Functions framework) + +为了降低开发过程中学习函数规范的成本,我们需要增加一种机制来实现从函数代码到可运行的应用之间的转换。这个机制需要制作一个通用的 main 函数来实现,这个函数用于处理通过 serving url 函数进来的请求。主函数中具体包含了很多步骤,其中一个步骤用于关联用户提交的代码,其余的用于做一些普通的工作(如处理上下文、处理事件源、处理异常、处理端口等等)。 + +在函数构建的过程中,构建器会使用主函数模板渲染用户代码,在此基础上生成应用容器镜像中的 main 函数。我们直接来看个例子,假设有这样一个函数。 + +```go +package hello + +import ( + "fmt" + "net/http" +) + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "Hello, World!\n") +} +``` + +经函数框架转换后会生成如下的应用代码: + +```go +package main + +import ( + "context" + "errors" + "fmt" + "github.com/OpenFunction/functions-framework-go/functionframeworks" + ofctx "github.com/OpenFunction/functions-framework-go/openfunction-context" + cloudevents "github.com/cloudevents/sdk-go/v2" + "log" + "main.go/userfunction" + "net/http" +) + +func register(fn interface{}) error { + ctx := context.Background() + if fnHTTP, ok := fn.(func(http.ResponseWriter, *http.Request)); ok { + if err := functionframeworks.RegisterHTTPFunction(ctx, fnHTTP); err != nil { + return fmt.Errorf("Function failed to register: %v\n", err) + } + } else if fnCloudEvent, ok := fn.(func(context.Context, cloudevents.Event) error); ok { + if err := functionframeworks.RegisterCloudEventFunction(ctx, fnCloudEvent); err != nil { + return fmt.Errorf("Function failed to register: %v\n", err) + } + } else if fnOpenFunction, ok := fn.(func(*ofctx.OpenFunctionContext, []byte) ofctx.RetValue); ok { + if err := functionframeworks.RegisterOpenFunction(ctx, fnOpenFunction); err != nil { + return fmt.Errorf("Function failed to register: %v\n", err) + } + } else { + err := errors.New("unrecognized function") + return fmt.Errorf("Function failed to register: %v\n", err) + } + return nil +} + +func main() { + if err := register(userfunction.HelloWorld); err != nil { + log.Fatalf("Failed to register: %v\n", err) + } + + if err := functionframeworks.Start(); err != nil { + log.Fatalf("Failed to start: %v\n", err) + } +} +``` + +其中高亮的部分就是前面用户自己写的函数。在启动应用之前,先对该函数进行注册,可以注册 HTTP 类的函数,也可以注册 cloudevents 和 OpenFunction 函数。注册完成后,就会调用 `functionframeworks.Start` 启动应用。 + +### 函数构建 (Build) + +有了应用之后,我们还要把应用构建成容器镜像。目前 Kubernetes 已经废弃了 dockershim,不再把 Docker 作为默认的容器运行时,这样就无法在 Kubernetes 集群中以 Docker in Docker 的方式构建容器镜像。还有没有其他方式来构建镜像?如何管理构建流水线? + +Tekton 是一个优秀的流水线工具,原来是 Knative 的一个子项目,后来捐给了 [CD 基金会 (Continuous Delivery Foundation)](https://cd.foundation/)。Tekton 的流水线逻辑其实很简单,可以分为三个步骤:获取代码,构建镜像,推送镜像。每一个步骤在 Tekton 中都是一个 Task,所有的 Task 串联成一个流水线。 + +作容器镜像有多种选择,比如 Kaniko、Buildah、BuildKit 以及 Cloud Native Buildpacks(CNB)。其中前三者均依赖 Dockerfile 去制作容器镜像,而 Cloud Native Buildpacks(CNB)是云原生领域最新涌现出来的新技术,它是由 Pivotal 和 Heroku 发起的,不依赖于 Dockerfile,而是能自动检测要 build 的代码,并生成符合 OCI 标准的容器镜像。这是一个非常惊艳的技术,目前已经被 Google Cloud、IBM Cloud、Heroku、Pivotal 等公司采用,比如 Google Cloud 上面的很多镜像都是通过 Cloud Native Buildpacks(CNB)构建出来的。 + +面对这么多可供选择的镜像构建工具,如何在函数构建的过程中让用户自由选择和切换镜像构建的工具?这就需要用到另外一个项目 [Shipwright](https://github.com/shipwright-io/build),这是由 Red Hat 和 IBM 开源的项目,专门用来在 Kubernetes 集群中构建容器镜像,目前也捐给了 CD 基金会。使用 Shipwright,你就可以在上述四种镜像构建工具之间进行灵活切换,因为它提供了一个统一的 API 接口,将不同的构建方法都封装在这个 API 接口中。 + +我们可以通过一个示例来理解 Shipwright 的工作原理。首先需要一个自定义资源 `Build` 的配置清单: + +```yaml +apiVersion: shipwright.io/v1alpha1 +kind: Build +metadata: + name: buildpack-nodejs-build +spec: + source: + url: https://github.com/shipwright-io/sample-nodejs + contextDir: source-build + strategy: + name: buildpacks-v3 + kind: ClusterBuildStrategy + output: + image: docker.io/${REGISTRY_ORG}/sample-nodejs:latest + credentials: + name: push-secret +``` + +这个配置清单分为 3 个部分: + ++ source 表示去哪获取源代码; ++ output 表示源代码构建的镜像要推送到哪个镜像仓库; ++ strategy 指定了构建镜像的工具。 + +其中 strategy 是由自定义资源 `ClusterBuildStrategy` 来配置的,比如使用 buildpacks 来构建镜像,ClusterBuildStrategy 的内容如下: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112012139381.png) + +这里分为两个步骤,一个是准备环境,一个是构建并推送镜像。每一步都是 Tekton 的一个 Task,由 Tekton 流水线来管理。 + +可以看到,Shipwright 的意义在于将镜像构建的能力进行了抽象,用户可以使用统一的 API 来构建镜像,通过编写不同的 strategy 就可以切换不同的镜像构建工具。 + +### 函数服务 (Serving) + +函数服务 (Serving) 指的是如何运行函数/应用,以及赋予函数/应用基于事件驱动或流量驱动的自动伸缩的能力 (Autoscaling)。CNCF Serverless 白皮书定义了函数服务的四种调用类型: + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112012154264.png) + +我们可以对其进行精简一下,主要分为两种类型: + ++ **同步函数**:客户端必须发起一个 HTTP 请求,然后必须等到函数执行完成并获取函数运行结果后才返回。 ++ **异步函数**:发起请求之后直接返回,无需等待函数运行结束,具体的结果通过 Callback 或者 MQ 通知等事件来通知调用者,即事件驱动 (Event Driven)。 + +同步函数和异步函数分别都有不同的运行时来实现: + ++ 同步函数方面,Knative Serving 是一个非常优秀的同步函数运行时,具备了强大的自动伸缩能力。除了 Knative Serving 之外,还可以选择基于 KEDA [http-add-on](https://github.com/kedacore/http-add-on) 配合 Kubernetes 原生的 Deployment 来实现同步函数运行时。这种组合方法可以摆脱对 Knative Serving 依赖。 ++ 异步函数方面,可以结合 [KEDA](https://keda.sh/) 和 [Dapr](https://dapr.io/) 来实现。KEDA 可以根据事件源的监控指标来自动伸缩 Deployment 的副本数量;Dapr 提供了函数访问 MQ 等中间件的能力。 + +Knative 和 KEDA 在自动伸缩方面的能力不尽相同,下面我们将展开分析。 + +#### Knative 自动伸缩 + +Knative Serving 有 3 个主要组件:Autoscaler、Serverless 和 Activator。`Autoscaler` 会获取工作负载的 Metric(比如并发量),如果现在的并发量是 0,就会将 Deployment 的副本数收缩为 0。但副本数缩为 0 之后函数就无法调用了,所以 Knative 在副本数缩为 0 之前会把函数的调用入口指向 `Activator`。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021128674.png) + +当有新的流量进入时,会先进入 Activator,Activator 接收到流量后会通知 Autoscaler,然后 Autoscaler 将 Deployment 的副本数扩展到 1,最后 Activator 会将流量转发到实际的 Pod 中,从而实现服务调用。这个过程也叫**冷启动**。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021142327.png) + +由此可知,Knative 只能依赖 Restful HTTP 的流量指标进行自动伸缩,但现实场景中还有很多其他指标可以作为自动伸缩的依据,比如 Kafka 消费的消息积压,如果消息积压数量过多,就需要更多的副本来处理消息。要想根据更多类型的指标来自动伸缩,我们可以通过 KEDA 来实现。 + +#### KEDA 自动伸缩 + +KEDA 需要和 Kubernetes 的 HPA 相互配合来达到更高级的自动伸缩的能力,HPA 只能实现从 1 到 N 之间的自动伸缩,而 KEDA 可以实现从 0 到 1 之间的自动伸缩,将 KEDA 和 HPA 结合就可以实现从 0 到 N 的自动伸缩。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021151799.png) + +KEDA 可以根据很多类型的指标来进行自动伸缩,这些指标可以分为这么几类: + ++ 云服务的基础指标,比如 AWS 和 Azure 的相关指标; ++ Linux 系统相关指标,比如 CPU、内存; ++ 开源组件特定协议的指标,比如 Kafka、MySQL、Redis、Prometheus。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021159588.png) + +例如要根据 Kafka 的指标进行自动伸缩,就需要这样一个配置清单: + +```yaml +apiVersion: keda.k8s.io/v1alpha1 +kind: ScaledObject +metadata: + name: kafka-scaledobject + namespace: default + labels: + deploymentName: kafka-consumer-deployment # Required Name of the deployment we want to scale. +spec: + scaleTargetRef: + deploymentName: kafka-consumer-deployment # Required Name of the deployment we want to scale. + pollingInterval: 15 + minReplicaCount: 0 + maxReplicaCount: 10 + cooldownPeriod: 30 + triggers: + - type: kafka + metadata: + topic: logs + bootstrapServers: kafka-logs-receiver-kafka-brokers.default.svc.cluster.local + consumerGroup: log-handler + lagThreshold: "10" +``` + +副本伸缩的范围在 0~10 之间,每 15 秒检查一次 Metrics,进行一次扩容之后需要等待 30 秒再决定是否进行伸缩。 + +同时还定义了一个触发器,即 Kafka 服务器的 “logs” topic。消息堆积阈值为 10,即当消息数量超过 10 时,logs-handler 的实例数量就会增加。如果没有消息堆积,就会将实例数量减为 0。 + +这种基于组件特有协议的指标进行自动伸缩的方式比基于 HTTP 的流量指标进行伸缩的方式更加合理,也更加灵活。 + +虽然 KEDA 不支持基于 HTTP 流量指标进行自动伸缩,但可以借助 KEDA 的 [http-add-on](https://github.com/kedacore/http-add-on) 来实现,该插件目前还是 Beta 状态,我们会持续关注该项目,等到它足够成熟之后就可以作为同步函数的运行时来替代 Knative Serving。 + +#### Dapr + +现在的应用基本上都是分布式的,每个应用的能力都不尽相同,为了将不同应用的通用能力给抽象出来,微软开发了一个分布式应用运行时,即 Dapr (Distributed Application Runtime)。Dapr 将应用的通用能力抽象成了**组件**,不同的**组件**负责不同的功能,例如服务之间的调用、状态管理、针对输入输出的资源绑定、可观测性等等。这些分布式组件都使用同一种 API 暴露给各个编程语言进行调用。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021310152.png) + +函数计算也是分布式应用的一种,会用到各种各样的编程语言,以 Kafka 为例,如果函数想要和 Kafka 通信,Go 语言就得使用 Go SDK,Java 语言得用 Java SDK,等等。你用几种语言去访问 Kafka,就得写几种不同的实现,非常麻烦。 + +再假设除了 Kafka 之外还要访问很多不同的 MQ 组件,那就会更麻烦,用 5 种语言对接 10 个 MQ(Message Queue) 就需要 **50 种实现**。使用了 Dapr 之后,10 个 MQ 会被抽象成一种方式,即 HTTP/GRPC 对接,这样就只需 **5 种实现**,大大减轻了开发分布式应用的工作量。 + +由此可见,Dapr 非常适合应用于函数计算平台。 + +## 新一代开源函数计算平台 OpenFunction + +结合上面讨论的所有技术,就诞生了 [OpenFunction](https://github.com/OpenFunction/OpenFunction/) 这样一个开源项目,它的架构如图所示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021428054.png) + +主要包含 4 个组件: + ++ **Function** : 将函数转换为应用; ++ **Build** : 通过 Shipwright 选择不同的镜像构建工具,最终将应用构建为容器镜像; ++ **Serving** : 通过 Serving CRD 将应用部署到不同的运行时中,可以选择同步运行时或异步运行时。同步运行时可以通过 Knative Serving 或者 KEDA-HTTP 来支持,异步运行时通过 Dapr+KEDA 来支持。 ++ **Events** : 对于事件驱动型函数来说,需要提供事件管理的能力。由于 Knative 事件管理过于复杂,所以我们研发了一个新型事件管理驱动叫 **OpenFunction Events**。 + + ![](https://pek3b.qingstor.com/kubesphere-community/images/202112021436924.png) + + OpenFunction Events 借鉴了 Argo Events 的部分设计,并引入了 Dapr。整体架构分为 3 个部分: + + + **EventSource** : 用于对接多种多样的事件源,通过异步函数来实现,可以根据事件源的指标自动伸缩,使事件的消费更加具有弹性。 + + **EventBus** : `EventBus` 利用 Dapr 的能力解耦了 EventBus 与底层具体 Message Broker 的绑定,你可以对接各种各样的 MQ。`EventSource` 消费事件之后有两种处理方式,一种是直接调用同步函数,然后等待同步函数返回结果;另一种方式是将其写入 `EventBus`,EventBus 接收到事件后会直接触发一个异步函数。 + + **Trigger** : Trigger 会通过各种表达式对 `EventBus` 里面的各种事件进行筛选,筛选完成后会写入 `EventBus`,触发另外一个异步函数。 + +关于 OpenFunction 的实际使用案例可以参考这篇文章:[以 Serverless 的方式用 OpenFunction 异步函数实现日志告警](https://kubesphere.com.cn/blogs/serverless-way-for-kubernetes-log-alert/)。 + +## OpenFunction Roadmap + +![](https://pek3b.qingstor.com/kubesphere-community/images/202112021742729.png) + +OpenFunction 的第一个版本于今年 5 月份发布,从 v0.2.0 开始支持异步函数,v0.3.1 开始新增了 OpenFunction Events,并支持了 Shipwright,v0.4.0 新增了 CLI。 + +**后续我们还会引入可视化界面,支持更多的 EventSource,支持对边缘负载的处理能力,通过 WebAssembly 作为更加轻量的运行时,结合 Rust 函数来加速冷启动速度。** + +## 加入 OpenFunction 社区 + +期待感兴趣的开发者加入 [OpenFunction 社区](https://github.com/OpenFunction)。可以提出任何你对 OpenFunction 的疑问、设计提案与合作提议。 + +可加群主微信:kubesphere,备注进 OpenFunction 交流群。 + +您可以在这里找到 OpenFunction 的一些典型使用案例: + +- [以 Serverless 的方式实现 Kubernetes 日志告警](https://kubesphere.com.cn/blogs/serverless-way-for-kubernetes-log-alert/) +- [OpenFunction Serverless Samples](https://github.com/OpenFunction/samples) +- [OpenFunction Events Samples](https://github.com/OpenFunction/OpenFunction/blob/main/docs/concepts/OpenFunction-events-framework.md) +- [OpenFunction 官网](https://openfunction.dev/) diff --git a/content/zh/blogs/how-to-use-kubesphere-project-gateways-and-routes.md b/content/zh/blogs/how-to-use-kubesphere-project-gateways-and-routes.md new file mode 100644 index 000000000..d45cc5335 --- /dev/null +++ b/content/zh/blogs/how-to-use-kubesphere-project-gateways-and-routes.md @@ -0,0 +1,111 @@ +--- +title: '深入浅出 Kubernetes 项目网关与应用路由' +tag: 'KubeSphere, Kubernetes' +keywords: 'KubeSphere, Kubernetes, Gateway, 网关, Spring Cloud' +description: '本篇内容简述了应用路由的基本架构,并与 Kubernetes Service 及其他应用网关分别做了对比。最后通过 SockShop 这个案例讲解的应用路由的配置方法。' +createTime: '2021-07-28' +author: '马岩' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202109071715557.png' +--- + +KubeSphere 项目网关与应用路由提供了一种聚合服务的方式,将集群的内部服务通过一个外部可访问的 IP 地址以 HTTP 或 HTTPs 暴露给集群外部。应用路由定义了这些服务的访问规则,用户可以定义基于 host 主机名称和 URL 匹配的规则。同时还可以配置 HTTPs offloading 等选项。项目网关则是应用路由的具体实现,它承载了流量的入口并根据应用路由规则将匹配到的请求转发至集群内的服务。 + +## 整体架构 + +用户的服务和应用路由的架构密不可分,因此我们需要结合用户服务来理解项目网关的整体架构。一个典型生产环境中,项目网关架构如下图所示: + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370451-193428-kubernetes-ingress.png) + +图中组件共分为四个部分: + +1. `Nginx Ingress Controller` 是应用网关的核心组件。KubeSphere 项目网关基于 `Nginx Ingress Controller` 实现,它通过获 `Ingress` 对象生成 Nginx 反向代理规则配置并配置应用于 Nginx 服务。应用路由是一个 `Ingress` 对象。应用网关依赖于 `Service` 对外暴露 Nginx 服务,因此 `Service` 在生产环境中一般设置为 `LoadBalancer` 类型,由云服务商配置其公有云 IP 地址及外部负载均衡器,用以保障服务的高可用性。 +2. 外部负载均衡器,应用网关的 `Service` 生成的外部负载均衡器,一般由各个云服务商提供。因此每种负载均衡器的特性有很多差别,比如 SLA、带宽、IP 配置等等。我们一般可以通过服务商提供的注解对其进行配置,在设置网关时,我们通常需要了解这些特性。 +3. DNS 域名解析服务, 一般由域名服务商提供服务,我们可以配置域名解析纪录将域名指向 `LoadBalancer` 的公网 IP。如果子域名也指向同一 IP,我们可以可使用泛域名解析方式。 +4. 用户服务与应用路由,用户需要为应用程序创建 `Service` 用于暴露集群内的服务,然后创建应用路由对外暴露服务。注,`Nginx Ingress Controller` 并不通过 `Kube-proxy` 访问服务 IP。它通过服务查找与之关联 `POD` 的 `EndPoint`,并将其设置为 `Nginx` 的 `Upstream`。Nginx 直接连接 `POD` 可以避免由 `Service` 带来的额外网络开销。 + +### 应用路由 vs Service(type=LoadBalancer) + +在实践过程中,应用路由与 `Service` 的应用场景常常令人混淆。它们都可以向集群外暴露集群内服务,并提供负载均衡功能。并且应用路由看起来也是*依赖*于服务的,那么他们究竟有何区别呢?这个问题我们需要从以下几个角度理解。 + +1. `Service` 最初的设计动机是将某个服务的后端(Pod)进行抽象并公开为网络服务。它通常是以一个服务为单位的,所有后端均运行相同的服务端。而`应用路由`的设计目标是对 API 对象进行管理。它虽然也可以暴露一个服务,但是它更强大的功能在于其可以将一系列服务进行聚合,对外提供统一的访问 IP、域名、URL 等。 +2. `Service` 工作在 TCP/IP 协议的第四层,因此它使用 `IP+端口+协议` 三元组作为服务的唯一标识。因此当我们需要暴露一个服务时,它不能与其他已存在的服务冲突。例如,我们暴露基于 HTTP/HTTPs 的服务时,通常这类服务都会占用 80、443 端口,为了避免端口冲突,就需要为每个暴露的服务申请一个独立的 IP 地址,导致资源浪费。`应用路由`工作在七层,所有通过应用路由暴露的服务都可以共享项目网关的 IP 地址和 80、443 端口。每个`应用路由`使用 `Host+URL` 作为服务的唯一标识,将 HTTP 请求转发到后端服务中。 +3. `Service` 支持 TCP 与 UDP 协议并且对上层协议没有限制,而应用路由目前只支持 HTTP/HTTPs 或 HTTP2 协议,无法转发基于 TCP 或 UDP 的其他协议。 + +结合以上三点,我们不难得看出:应用路由更适用于使用 HTTP 协议的微服务架构的场景中,而 `Service` 虽然对 HTTP 协议没有深度的支持,但是它可以支持更多其他协议。 + +### 应用路由 vs Spring Cloud Gateway 或 Ocelot + +Java、.net Core 的开发人员对 `Spring Cloud Gateway` 或 `Ocelot` 一定不会感到陌生,他们是各自语言领域中最常用的 API 网关。那么到我们是否可以直接使用这些网关呢?理解这个问题,我们首先要知道什么是 API 网关,在 Wiki 百科中 `API Gateway` 并没有一个明确的定义,但我们从各个大厂的服务说明中可以得出一个基本的结论: + +> API 网关作为用户与后端服务之间的唯一入口管理后端服务,即 API 网关提供了一个方向代理服务将后端服务进行聚合,将客户端请求路由到后端服务并将结果返回给客户端。同时,API 网关可提供身份认证、监控、负载均衡、HTTPS offloading 等高级功能。 + +因此,应用路由承担了 API 网关的职责,即它与 `Spring Cloud Gateway` 或 `Ocelot` 等 API 网关具有同等地位。诸如 `Spring Cloud Gateway` 类的 API 网关通过 `Service` 的方式暴露到集群外部也可替代部分应用路由功能。我们接下做一个简要的对比,并分析一下他们的优缺点: + +1. 作为应用网关的基本职责,它们均具有路由转发功能。并且以上提到的网关均支持基于 HOST、URL 的路由转发规则设置。 +2. 服务注册与发现,`Spring Cloud Gateway` 等全家桶式解决方案提供了非常丰富的支持选项,对于 java 开发者更为友好,网关上的服务均可通过注册中心服务无缝衔接。而 Ocelot 虽然未内置服务发现与注册方案,但是可以通过 Ocelot + Consul 的方式实现。对比之下 Kubernetes 集群中部署应用,一般采用基于 DNS 的服务发现方式,但并没有为客户端提供一个统一的服务注册发现方式。对外暴露的服务需要显示的创建 Ingress 规则。相比之下 `Spring Cloud Gateway` 类的 API 网关使用相同技术栈,这可以极大的简化开发人员的学习成本。 +3. 通用性上,Ingress 是云原生背景下 Kubernetes 社区定义的 API 管理规范。KubeSphere 默认采用 `Nginx Ingress Controller`实现。同时我们可以使用任何兼容的第三方 Ingress 控制器进行替换。Ingress 中只定义了基本共性的功能,但网关通常会提供日志、监控、安全等更多通用的运维工具。相比之下,与语言紧密结合的 API 网关通常与开发平台进行绑定,语言相互替代性较差(不愿引入更多技术栈或无客户端集成支持)。功能相对固定,但大多提供了良好的插件机制,开发人员使用自己熟悉的语言进行拓展。 +4. 性能方面,毋庸置疑,以基于 Nginx 的 Ingress Controller 为代表的通用型 API 网关,比 `Spring Cloud Gateway`、`Ocelot` 等有非常明显的性能优势。 + +总体来讲,每种网关都有其优缺点或局限性。在项目初期应首先考虑应用网关的架构。在基于云原生的场景下,应用路由会是一个不错的选择。而如果您的团队依赖于开发技术栈,那么常用技术栈中的 API 网关通常也会作为首选。但这并不意味着它们必须进行二选一,在一些复杂场景下我们可以结合二者的优势,开发人员使用自己熟知的 API 网关用于服务聚合、认证鉴权等功能,同时在其前方放置应用网关实现日志监控,负载均衡,HTTPs offloading 等工作。 + +微软官方微服务架构示例 [eShopOnContainers](https://docs.microsoft.com/en-us/dotnet/architecture/cloud-native/introduce-eshoponcontainers-reference-app "eShopOnContainers") 即采用了该种混合架构。 + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370654-571190-eshoponcontainers-architecture-aggregator-services.png) + +## 动手实战 + +理解以上应用场景和整体架构后,我们接下来演示如何在 KubeSphere 中配置项目网关和应用路由。以下内容将基于 Weaveworks 的微服务演示项目 SockShop 实现。SockShop 是一个典型的前后端分离架构,它由前端服务 `front-end` 和若干后端服务 `catalogue`、`carts`、`orders` 等组成。在当前架构下,`front-end` 除了承担静态页面服务的功能,还承担了后端 API 代理转发的任务。我们假设以下场景,即由 Nodejs 转发 API 造成服务异步阻塞,从而影响页面性能。因此我们决定使用 ingress 直接转发服务 `catalogue` 用以提升性能。下面我们看一下详细配置步骤。 + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370560-468146-socksshop.png) + +### 准备工作 + +1. 在部署 SockShop 之前,我们首先要配置一个用于演示的企业空间 `workspace-demo` 和项目 `sock-shop`。具体步骤请参考[《创建企业空间、项目、用户和角色》](https://kubesphere.com.cn/docs/quick-start/create-workspace-and-project/ "《创建企业空间、项目、用户和角色》") + +2) 完成项目 `sock-shop` 的创建后,我们接下来使用 `kubectl` 部署 SockShop 的相关服务。您可以使用本地的控制台或 KubeSphere web 工具箱中的 `kubectl`执行以下命令。 + +``` +kubectl -n sock-shop apply -f https://github.com/microservices-demo/microservices-demo/raw/master/deploy/kubernetes/complete-demo.yaml +``` + +执行过后可以进入 `sock-shop` 的`工作负载`页面查看部署的状态,等待所有的部署都正常运行后,我们再进行下一步操作。 + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371198-6886-workload.png) + +### 项目网关配置 + +1. 进入 `sock-shop` 项目,从左侧导航栏进入项目设置下的高级设置页面,然后点击设置网关。 + +2. 在接下来弹出的对话框中,需要根据 KubeSphere 的安装环境进行设置。如果您使用的是本地开发环境或私有环境可以选择 `NodePort` 的方式暴露网关。如果是托管 Kubernetes 云服务,一般选择 LoadBalancer。 + +### 应用路由配置 + +1. 首先,我们选择左侧导航栏**应用负载**中的**应用路由**,点击右侧的创建。在基本信息中填写名称 `frontend`。在路由规则中,添加一条新的规则。由于是演示项目,我们使用自动生成模式。KubeSphere 自动以<服务名称>.<项目名称>.<网关地址>.nip.io 格式生成域名,该域名由 nip.io 自动解析为网关地址。在路径、服务、端口上依次选择 "/"、"front-end"、"80"。点击**下一步**后,继续点击**创建**。 + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371226-863229-router.png) + +2. 路由创建完成后,可以在应用路由列表页面点击 `frontend` 进入详情。并在规则中可以点击**点击访问**访问按钮。在新的浏览器 tab 下,应该出现如下的网站: + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371245-961841-sockshop.png) + +3. 为了与下面的步骤进行对比,我们在 SockShop 的网站页面打开调试功能查看网络请求,以 Chrome 为例只需点击键盘的**F12**键。刷新一下页面后我们找到如下 `catalogue` API 请求: + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371262-490907-f12.png) + +该请求头中的 `X-Powered-By:Express` 表明了这条请求是由前端的 Nodejs 应用转发。 + +4. 接下来,在 `frontend` 的详情页面点击左侧的**更多操作**,并选择**编辑规则**。在弹出的编辑规则页面,选择刚刚增加的规则,并点击左侧的编辑图标。新增一条路径,在路径、服务、端口上依次选择"/catalogue"、"catalogue"、"80"。保存该设置。编辑后的规则如下: + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371282-336585-router2.png) + +5. 我们再次访问 SockShop 的网站页面,该页面并没有任何变化。我们使用浏览器调试器,再次查看网络请求,`catalogue` 的请求如下: + +![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371313-315498-f12-after.png) + +我们发现该请求已经没有了 `X-Powered-By:Express` 请求头,这说明了我们上面应用的规则已经生效,`catalogue`相关的 API 请求已经通过应用路由直接转发 `catalogue` 服务了,而不需要再通过 `fron-tend` 服务进行中转。以上的配置我们利用了路由规则的最长匹配规则。“/catalogue”比更路径具有更高的优先级。 + +更多配置内容可以参考[《应用路由》](https://kubesphere.com.cn/docs/project-user-guide/application-workloads/routes/ "《应用路由》") + +## 总结 + +本篇内容简述了应用路由的基本架构,并与 Kubernetes Service 及其他应用网关分别做了对比。最后通过 SockShop 这个案例讲解的应用路由的配置方法。希望读者对应用路由能有进一步的理解,根据应用的特性选择合适的外部服务暴露方式。 \ No newline at end of file diff --git a/content/zh/blogs/kubernetes-client-go-indexer-threadsafestore.md b/content/zh/blogs/kubernetes-client-go-indexer-threadsafestore.md new file mode 100644 index 000000000..d6ce9fd6a --- /dev/null +++ b/content/zh/blogs/kubernetes-client-go-indexer-threadsafestore.md @@ -0,0 +1,378 @@ +--- +title: 'Kubernetes client-go 源码分析 - Indexer & ThreadSafeStore' +tag: 'Kubernetes' +keywords: 'Kubernetes, client-go, Indexer, ThreadSafeStore' +description: 'Indexer 主要依赖于 ThreadSafeStore 实现,是 client-go 提供的一种缓存机制,通过检索本地缓存可以有效降低 apiserver 的压力,本文详细解读了 Indexer 和对应的 ThreadSafeStore 的实现。' +createTime: '2021-10-26' +author: 'Daniel Hu' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubernetes-client-go-cover.png' +--- + +## 概述 + +> 源码版本信息 +> +> - Project: kubernetes +> - Branch: master +> - Last commit id: d25d741c +> - Date: 2021-09-26 + +自定义控制器涉及到的 client-go 组件整体工作流程,大致如下图: + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubernetes-client-go.png) + +Indexer 主要依赖于 ThreadSafeStore 实现,是 client-go 提供的一种缓存机制,通过检索本地缓存可以有效降低 apiserver 的压力,今天我们来详细看下 Indexer 和对应的 ThreadSafeStore 的实现。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/threadsafemap.png) + +## Indexer 接口 + +Indexer 接口主要是在 Store 接口的基础上拓展了对象的检索功能: + +- **client-go/tools/cache/index.go:35** + +```go +type Indexer interface { + Store + Index(indexName string, obj interface{}) ([]interface{}, error) // 根据索引名和给定的对象返回符合条件的所有对象 + IndexKeys(indexName, indexedValue string) ([]string, error) // 根据索引名和索引值返回符合条件的所有对象的 key + ListIndexFuncValues(indexName string) []string // 列出索引函数计算出来的所有索引值 + ByIndex(indexName, indexedValue string) ([]interface{}, error) // 根据索引名和索引值返回符合条件的所有对象 + GetIndexers() Indexers // 获取所有的 Indexers,对应 map[string]IndexFunc 类型 + AddIndexers(newIndexers Indexers) error // 这个方法要在数据加入存储前调用,添加更多的索引方法,默认只通过 namespace 检索 +} +``` + +Indexer 的默认实现是 cache: + +```go +type cache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc +} +``` + +cache 对应两个方法体实现完全一样的 New 函数: + +```go +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} +``` + +这里涉及到两个类型: + +- KeyFunc +- ThreadSafeStore + +我们先看一下 Indexer 的 `Add()`、`Update()` 等方法是怎么实现的: + +```go +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} +``` + +可以看到这里的逻辑就是调用 `keyFunc()` 方法获取 key,然后调用 `cacheStorage.Xxx()` 方法完成对应增删改查过程。KeyFunc 类型是这样定义的: + +```go +type KeyFunc func(obj interface{}) (string, error) +``` + +也就是给一个对象,返回一个字符串类型的 key。KeyFunc 的一个默认实现如下: + +```go +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("object has no meta: %v", err) + } + if len(meta.GetNamespace()) > 0 { + return meta.GetNamespace() + "/" + meta.GetName(), nil + } + return meta.GetName(), nil +} +``` + +可以看到一般情况下返回值是 `` ,如果 namespace 为空则直接返回 name。类似的还有一个叫做 **IndexFunc** 的类型,定义如下: + +```go +type IndexFunc func(obj interface{}) ([]string, error) +``` + +这是给一个对象生成 Index 用的,一个通用实现如下,直接返回对象的 namespace 字段作为 Index: + +```go +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} +``` + +下面我们直接来看 cacheStorage 是如果实现增删改查的。 + +## ThreadSafeStore + +ThreadSafeStore 是 Indexer 的核心逻辑所在,Indexer 的多数方法是直接调用内部 cacheStorage 属性的方法实现的,同样先看接口定义: + +- **client-go/tools/cache/thread_safe_store.go:41** + +```go +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexKey string) ([]string, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexKey string) ([]interface{}, error) + GetIndexers() Indexers + AddIndexers(newIndexers Indexers) error + Resync() error // 过期了,没有具体代码逻辑 +} +``` + +对应实现: + +```go +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + indexers Indexers + indices Indices +} +``` + +这里的 Indexers 和 Indices 是: + +```go +type Index map[string]sets.String +type Indexers map[string]IndexFunc +type Indices map[string]Index +``` + +对照图片理解一下这几个字段的关系:Indexers 里存的是 Index 函数 map,一个典型的实现是字符串 namespace 作为 key,IndexFunc 类型的实现 `MetaNamespaceIndexFunc` 函数作为 value,也就是我们希望通过 namespace 来检索时,通过 Indexers 可以拿到对应的计算 Index 的函数,接着拿着这个函数,把对象穿进去,就可以计算出这个对象对应的 key,在这里也就是具体的 namespace 值,比如 default、kube-system 这种。然后在 Indices 里存的也是一个 map,key 是上面计算出来的 default 这种 namespace 值,value 是一个 set,而 set 表示的是这个 default namespace 下的一些具体 pod 的 `/` 这类字符串。最后拿着这种 key,就可以在 items 里检索到对应的对象了。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/threadsafemap.png) + +## threadSafeMap.Xxx() + +比如 `Add()` 方法代码如下: + +```go +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] // c.items 是 map[string]interface{} 类型 + c.items[key] = obj // 在 items map 里添加这个对象 + c.updateIndices(oldObject, obj, key) // 下面分析 +} +``` +可以看到更复杂的逻辑在 updateIndices 方法里,我们继续来看: + +- **client-go/tools/cache/thread_safe_store.go:256** + +```go +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { + // 添加场景这里是 nil,如果是更新,就需要删除旧对象的索引了 + if oldObj != nil { + c.deleteFromIndices(oldObj, key) // 删除操作后面具体看 + } + for name, indexFunc := range c.indexers { // 从 Indexers 里拿到索引函数,比如 "namespace":MetaNamespaceIndexFunc + indexValues, err := indexFunc(newObj) // 通过 MetaNamespaceIndexFunc 计算得到 namespace,比如 "default" + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + index := c.indices[name] // 拿到一个 Index,对应类型 map[string]sets.String + if index == nil { + index = Index{} + c.indices[name] = index // 如果 map 不存在则初始化一个 + } + + for _, indexValue := range indexValues { // "default" + set := index[indexValue] // 检索 "default" 下的 set,对应一个集合,多个 pod 信息 + if set == nil { + set = sets.String{} + index[indexValue] = set // 如果为空则初始化一个 + } + set.Insert(key) // key 也就是类似 "default/pod_1" 这样的字符串,保存到 set 里,也就完成了 key + obj 的 Add 过程 + } + } +} +``` + +上面还提到了一个 *deleteFromIndices* 方法,前半段和上面逻辑上类似的,最后拿到 set 后不同于上面的 Insert 过程,这里调用了一个 Delete。 + +```go +func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { + for name, indexFunc := range c.indexers { + indexValues, err := indexFunc(obj) + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := c.indices[name] + if index == nil { + continue + } + for _, indexValue := range indexValues { + set := index[indexValue] + if set != nil { + set.Delete(key) // set 中删除这个 key + if len(set) == 0 { + delete(index, indexValue) + } + } + } + } +} +``` + +## Index() 等实现 + +最后看几个具体方法等实现 + +### Index() 方法 + +来看一下 `Index()` 方法的实现,`Index()` 方法的作用是给定一个 obj 和 indexName,比如 pod1和 "namespace",然后返回 pod1 所在 namespace 下的所有 pod。 + +- **client-go/tools/cache/thread_safe_store.go:141** + +```go +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] // 提取索引函数,比如通过 "namespace" 提取到 MetaNamespaceIndexFunc + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) // 对象丢进去拿到索引值,比如 "default" + if err != nil { + return nil, err + } + index := c.indices[indexName] // indexName 例如 "namespace",这里可以查到 Index + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // 多数情况对应索引值为1到场景,比如用 namespace 时,值就是唯一的 + storeKeySet = index[indexedValues[0]] + } else { + // 对应不为1场景 + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + list := make([]interface{}, 0, storeKeySet.Len()) + // storeKey 也就是 "default/pod_1" 这种字符串,通过其就可以到 items map 里提取需要的 obj 了 + for storeKey := range storeKeySet { + list = append(list, c.items[storeKey]) + } + return list, nil +} +``` + +### `ByIndex()` 方法 + +相比 `Index()`,这个函数要简单的多,直接传递 indexedValue,也就不需要通过 obj 去计算 key 了,例如 indexName == namespace & indexValue == default 就是直接检索 default 下的资源对象。 + +```go +func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexedValue] + list := make([]interface{}, 0, set.Len()) + for key := range set { + list = append(list, c.items[key]) + } + + return list, nil +} +``` + +### IndexKeys() 方法 + +和上面返回 obj 列表不同,这里只返回 key 列表,就是 []string{"default/pod_1"} 这种数据 + +```go +func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexedValue] + return set.List(), nil +} +``` + +### Replace() 方法 + +`Replace()` 的实现简单粗暴,给一个新 items map,直接替换到 threadSafeMap.items 中,然后重建索引。 + +```go +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.indices = Indices{} + for key, item := range c.items { + c.updateIndices(nil, item, key) + } +} +``` + diff --git a/content/zh/blogs/kubernetes-client-go-workqueue.md b/content/zh/blogs/kubernetes-client-go-workqueue.md new file mode 100644 index 000000000..868273541 --- /dev/null +++ b/content/zh/blogs/kubernetes-client-go-workqueue.md @@ -0,0 +1,662 @@ +--- +title: 'Kubernetes client-go 源码分析 - workqueue' +tag: 'Kubernetes' +keywords: 'Kubernetes, client-go, workqueue' +description: '本文详细研究了 Kubernetes client-go workqueue 的相关代码。' +createTime: '2021-10-12' +author: 'Daniel Hu' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubernetes-client-go-cover.png' +--- + + +## 概述 + +>源码版本信息 +> +>- Project: kubernetes +>- Branch: master +>- Last commit id: d25d741c +>- Date: 2021-09-26 + +自定义控制器涉及到的 client-go 组件整体工作流程,大致如下图: + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubernetes-client-go.png) + +今天我们来详细研究下 workqueue 相关代码。client-go 的 util/workqueue 包里主要有三个队列,分别是普通队列,延时队列,限速队列,后一个队列以前一个队列的实现为基础,层层添加新功能,我们按照 Queue、DelayingQueue、RateLimitingQueue 的顺序层层拨开来看限速队列是如何实现的。 + +## Queue +### 接口和结构体 +先看接口定义: + +- k8s.io/client-go/util/workqueue/queue.go:26 + +```go +type Interface interface { + Add(item interface{}) // 添加一个元素 + Len() int // 元素个数 + Get() (item interface{}, shutdown bool) // 获取一个元素,第二个返回值和 channel 类似,标记队列是否关闭了 + Done(item interface{}) // 标记一个元素已经处理完 + ShutDown() // 关闭队列 + ShuttingDown() bool // 是否正在关闭 +} +``` + +这个基础的队列接口定义很清晰,我们继续来看其实现的类型: + +```go +type Type struct { + queue []t // 定义元素的处理顺序,里面所有元素都应该在 dirty set 中有,而不能出现在 processing set 中 + dirty set // 标记所有需要被处理的元素 + processing set // 当前正在被处理的元素,当处理完后需要检查该元素是否在 dirty set 中,如果有则添加到 queue 里 + + cond *sync.Cond // 条件锁 + shuttingDown bool // 是否正在关闭 + metrics queueMetrics + unfinishedWorkUpdatePeriod time.Duration + clock clock.Clock +} +``` + +Queue 的工作逻辑大致是这样,里面的三个属性 queue、dirty、processing 都保存 items,但是含义有所不同: + +- queue:这是一个 []t 类型,也就是一个切片,因为其有序,所以这里当作一个列表来存储 item 的处理顺序。 +- dirty:这是一个 set 类型,也就是一个集合,这个集合存储的是所有需要处理的 item,这些 item 也会保存在 queue 中,但是 set 里是无需的,set 的特性是唯一。 +- processing:这也是一个 set,存放的是当前正在处理的 item,也就是说这个 item 来自 queue 出队的元素,同时这个元素会被从 dirty 中删除。 + +下面分别介绍 set 类型和 Queue 接口的集合核心方法的实现。 + +### set +上面提到的 dirty 和 processing 字段都是 set 类型,set 相关定义如下: + +```go +type empty struct{} +type t interface{} +type set map[t]empty + +func (s set) has(item t) bool { + _, exists := s[item] + return exists +} + +func (s set) insert(item t) { + s[item] = empty{} +} + +func (s set) delete(item t) { + delete(s, item) +} +``` + +set 是一个空接口到空结构体的 map,也就是实现了一个集合的功能,集合元素是 `interface{}` 类型,也就是可以存储任意类型。而 map 的 value 是 `struct{}` 类型,也就是空。这里利用 map 的 key 唯一的特性实现了一个集合类型,附带三个方法 `has()`、`insert()`、`delete()` 来实现集合相关操作。 + +### `Add()` +`Add()` 方法用于标记一个 item 需要被处理,代码如下: + +```go +func (q *Type) Add(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if q.shuttingDown { // 如果 queue 正在被关闭,则返回 + return + } + if q.dirty.has(item) { // 如果 dirty set 中已经有了该 item,则返回 + return + } + + q.metrics.add(item) + + q.dirty.insert(item) // 添加到 dirty set 中 + if q.processing.has(item) { // 如果正在被处理,则返回 + return + } + + q.queue = append(q.queue, item) // 如果没有正在处理,则加到 q.queue 中 + q.cond.Signal() // 通知某个 getter 有新 item 到来 +} +``` + +### `Get()` + +```go +func (q *Type) Get() (item interface{}, shutdown bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + for len(q.queue) == 0 && !q.shuttingDown { // 如果 q.queue 为空,并且没有正在关闭,则等待下一个 item 的到来 + q.cond.Wait() + } + if len(q.queue) == 0 { // 这时候如果 q.queue 长度还是 0,说明 q.shuttingDown 为 true,所以直接返回 + return nil, true + } + + item, q.queue = q.queue[0], q.queue[1:] // 获取 q.queue 第一个元素,同时更新 q.queue + + q.metrics.get(item) + + q.processing.insert(item) // 刚才获取到的 q.queue 第一个元素放到 processing set 中 + q.dirty.delete(item) // dirty set 中删除该元素 + + return item, false // 返回 item +} +``` + +### `Done()` + +```go +func (q *Type) Done(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.metrics.done(item) + + q.processing.delete(item) // processing set 中删除该 item + if q.dirty.has(item) { // 如果 dirty 中还有,说明还需要再次处理,放到 q.queue 中 + q.queue = append(q.queue, item) + q.cond.Signal() // 通知某个 getter 有新的 item + } +} +``` + +## DelayingQueue + +### 接口和结构体 + +还是先看接口定义: + +- k8s.io/client-go/util/workqueue/delaying_queue.go:30 + +```go +type DelayingInterface interface { + Interface + // AddAfter adds an item to the workqueue after the indicated duration has passed + AddAfter(item interface{}, duration time.Duration) +} +``` + +相比 Queue 这里只是多了一个 AddAfter(item interface{}, duration time.Duration) 方法,望文生义,也就是延时添加 item。 + +结构体定义: + +```go +type delayingType struct { + Interface // 用来嵌套普通 Queue + clock clock.Clock // 计时器 + stopCh chan struct{} + stopOnce sync.Once // 用来确保 ShutDown() 方法只执行一次 + heartbeat clock.Ticker // 默认10s的心跳,后面用在一个大循环里,避免没有新 item 时一直卡住 + waitingForAddCh chan *waitFor // 传递 waitFor 的 channel,默认大小 1000 + metrics retryMetrics +} +``` + +对于延时队列,我们关注的入口方法肯定就是新增的 `AddAfter()` 了,看这个方法的具体的逻辑前我们先看下上面提到的 waitFor 类型。 + +### `waitFor` + +先看下 `waitFor` 结构定义,代码如下: + +```go +type waitFor struct { + data t // 准备添加到队列中的数据 + readyAt time.Time // 应该被加入队列的时间 + index int // 在 heap 中的索引 +} +``` + +然后可以注意到有这样一行代码: + +```go +type waitForPriorityQueue []*waitFor +``` + +这里定义了一个 `waitFor` 的优先级队列,用最小堆的方式来实现,这个类型实现了 heap.Interface 接口,我们具体看下源码: + +```go +// 添加一个 item 到队列中 +func (pq *waitForPriorityQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*waitFor) + item.index = n + *pq = append(*pq, item) // 添加到队列的尾巴 +} + +// 从队列尾巴移除一个 item +func (pq *waitForPriorityQueue) Pop() interface{} { + n := len(*pq) + item := (*pq)[n-1] + item.index = -1 + *pq = (*pq)[0:(n - 1)] + return item +} + +// 获取队列第一个 item +func (pq waitForPriorityQueue) Peek() interface{} { + return pq[0] +} +``` + +### NewDelayingQueue + +接着看一下 DelayingQueue 相关的几个 New 函数,理解了这里的逻辑,才能继续往后面分析 `AddAfter()` 方法。 + +```go +// 这里可以传递一个名字 +func NewNamedDelayingQueue(name string) DelayingInterface { + return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) +} + +// 上面一个函数只是调用当前函数,附带一个名字,这里加了一个指定 clock 的能力 +func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface { + return newDelayingQueue(clock, NewNamed(name), name) // 注意这里的 NewNamed() 函数 +} + +func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType { + ret := &delayingType{ + Interface: q, + clock: clock, + heartbeat: clock.NewTicker(maxWait), // 10s 一次心跳 + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), + } + + go ret.waitingLoop() // 留意这里的函数调用 + return ret +} +``` + +上面涉及到两个细节: + +- NewNamed(name) +- go ret.waitingLoop() + +`NewNamed()` 函数用于创建一个前面提到的 Queue 的对应类型 Type 对象,这个值被传递给了 `newDelayingQueue()` 函数,进而赋值给了 `delayingType{}` 对象的 Interface 字段,于是后面 delayingType 类型才能直接调用 Type 类型实现的方法。 + +```go +func NewNamed(name string) *Type { + rc := clock.RealClock{} + return newQueue( + rc, + globalMetricsFactory.newQueueMetrics(name, rc), + defaultUnfinishedWorkUpdatePeriod, + ) +} +``` + +`waitingLoop()` 方法逻辑不少,我们单独放到下面一个小节。 + +## `waitingLoop()` + +这个方法是实现延时队列的核心逻辑所在: + +```go +func (q *delayingType) waitingLoop() { + defer utilruntime.HandleCrash() + // 队列里没有 item 时实现等待用的 + never := make(<-chan time.Time) + var nextReadyAtTimer clock.Timer + // 构造一个有序队列 + waitingForQueue := &waitForPriorityQueue{} + heap.Init(waitingForQueue) // 这一行其实是多余的,等下提个 pr 给它删掉 + + // 这个 map 用来处理重复添加逻辑的,下面会讲到 + waitingEntryByData := map[t]*waitFor{} + // 无限循环 + for { + // 这个地方 Interface 是多余的,等下也提个 pr 把它删掉吧 + if q.Interface.ShuttingDown() { + return + } + + now := q.clock.Now() + // 队列里有 item 就开始循环 + for waitingForQueue.Len() > 0 { + // 获取第一个 item + entry := waitingForQueue.Peek().(*waitFor) + // 时间还没到,先不处理 + if entry.readyAt.After(now) { + break + } + // 时间到了,pop 出第一个元素;注意 waitingForQueue.Pop() 是最后一个 item,heap.Pop() 是第一个元素 + entry = heap.Pop(waitingForQueue).(*waitFor) + // 将数据加到延时队列里 + q.Add(entry.data) + // map 里删除已经加到延时队列的 item + delete(waitingEntryByData, entry.data) + } + + // 如果队列中有 item,就用第一个 item 的等待时间初始化计时器,如果为空则一直等待 + nextReadyAt := never + if waitingForQueue.Len() > 0 { + if nextReadyAtTimer != nil { + nextReadyAtTimer.Stop() + } + entry := waitingForQueue.Peek().(*waitFor) + nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) + nextReadyAt = nextReadyAtTimer.C() + } + + select { + case <-q.stopCh: + return + case <-q.heartbeat.C(): // 心跳时间是 10s,到了就继续下一轮循环 + case <-nextReadyAt: // 第一个 item 的等到时间到了,继续下一轮循环 + case waitEntry := <-q.waitingForAddCh: // waitingForAddCh 收到新的 item + // 如果时间没到,就加到优先级队列里,如果时间到了,就直接加到延时队列里 + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + // 下面的逻辑就是将 waitingForAddCh 中的数据处理完 + drained := false + for !drained { + select { + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + insert(waitingForQueue, waitingEntryByData, waitEntry) + } else { + q.Add(waitEntry.data) + } + default: + drained = true + } + } + } + } +} +``` + +上面函数还有一个 `insert()` 调用,我们再来看一下这个插入逻辑: + +```go +func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { + // 这里的主要逻辑是看一个 entry 是否存在,如果已经存在,新的 entry 的 ready 时间更短,就更新时间 + existing, exists := knownEntries[entry.data] + if exists { + if existing.readyAt.After(entry.readyAt) { + existing.readyAt = entry.readyAt // 如果存在就只更新时间 + heap.Fix(q, existing.index) + } + + return + } + // 如果不存在就丢到 q 里,同时在 map 里记录一下,用于查重 + heap.Push(q, entry) + knownEntries[entry.data] = entry +} +``` + +### `AddAfter()` +这个方法的作用是在指定的延时到达之后,在 work queue 中添加一个元素,源码如下: + +```go +func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { + if q.ShuttingDown() { // 已经在关闭中就直接返回 + return + } + + q.metrics.retry() + + if duration <= 0 { // 如果时间到了,就直接添加 + q.Add(item) + return + } + + select { + case <-q.stopCh: + // 构造 waitFor{},丢到 waitingForAddCh + case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + } +} +``` + +## RateLimitingQueue +最后一个 workqueue 就是限速队列,我们继续来看。 + +### 接口和结构体 + +先看接口定义: + +- k8s.io/client-go/util/workqueue/rate_limiting_queue.go:20 + +```go +type RateLimitingInterface interface { + DelayingInterface // 延时队列里内嵌了普通队列,限速队列里内嵌了延时队列 + AddRateLimited(item interface{}) // 限速方式往队列里加入一个元素 + Forget(item interface{}) // 标识一个元素结束重试 + NumRequeues(item interface{}) int // 标识这个元素被处理里多少次了 +} +``` + +然后看下两个 New 函数。 + +```go +func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewDelayingQueue(), + rateLimiter: rateLimiter, + } +} + +func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewNamedDelayingQueue(name), + rateLimiter: rateLimiter, + } +} +``` + +这里的区别就是里面的延时队列有没有指定的名字。注意到这里有一个 RateLimiter 类型,后面要详细讲,另外 rateLimitingType 就是上面接口的具体实现类型了。 + +### RateLimiter +RateLimiter 表示一个限速器,我们看下限速器是什么意思。先看接口定义: + +- k8s.io/client-go/util/workqueue/default_rate_limiters.go:27 + +```go +type RateLimiter interface { + When(item interface{}) time.Duration // 返回一个 item 需要等待的时常 + Forget(item interface{}) // 标识一个元素结束重试 + NumRequeues(item interface{}) int // 标识这个元素被处理里多少次了 +} +``` + +这个接口有五个实现,分别叫做: + +- BucketRateLimiter +- ItemExponentialFailureRateLimiter +- ItemFastSlowRateLimiter +- MaxOfRateLimiter +- WithMaxWaitRateLimiter + +下面分别来看: + +- BucketRateLimiter +这个限速器可说的不多,用了 golang 标准库的 golang.org/x/time/rate.Limiter 实现。BucketRateLimiter 实例化的时候比如传递一个 rate.NewLimiter(rate.Limit(10), 100) 进去,表示令牌桶里最多有 100 个令牌,每秒发放 10 个令牌。 + +```go +type BucketRateLimiter struct { + *rate.Limiter +} + +var _ RateLimiter = &BucketRateLimiter{} + +func (r *BucketRateLimiter) When(item interface{}) time.Duration { + return r.Limiter.Reserve().Delay() // 过多久后给当前 item 发放一个令牌 +} + +func (r *BucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *BucketRateLimiter) Forget(item interface{}) { +} +``` + +- ItemExponentialFailureRateLimiter + + Exponential 是指数的意思,从这个限速器的名字大概能猜到是失败次数越多,限速越长而且是指数级增长的一种限速器。 + + 结构体定义如下,属性含义基本可以望文生义。 + +```go +type ItemExponentialFailureRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + baseDelay time.Duration + maxDelay time.Duration +} +``` + +主要逻辑是 `When()` 函数是如何实现的: + +```go +func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + exp := r.failures[item] + r.failures[item] = r.failures[item] + 1 // 失败次数加一 + + // 每调用一次,exp 也就加了1,对应到这里时 2^n 指数爆炸 + backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) + if backoff > math.MaxInt64 { // 如果超过了最大整型,就返回最大延时,不然后面时间转换溢出了 + return r.maxDelay + } + + calculated := time.Duration(backoff) + if calculated > r.maxDelay { // 如果超过最大延时,则返回最大延时 + return r.maxDelay + } + + return calculated +} +``` + +另外两个函数太简单了: + +```go +func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} +``` + +- ItemFastSlowRateLimiter +快慢限速器,也就是先快后慢,定义一个阈值,超过了就慢慢重试。先看类型定义: + +```go +type ItemFastSlowRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + maxFastAttempts int // 快速重试的次数 + fastDelay time.Duration // 快重试间隔 + slowDelay time.Duration // 慢重试间隔 +} +``` + +同样继续来看具体的方法实现: + +```go +func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + r.failures[item] = r.failures[item] + 1 // 标识重试次数 + 1 + + if r.failures[item] <= r.maxFastAttempts { // 如果快重试次数没有用完,则返回 fastDelay + return r.fastDelay + } + + return r.slowDelay // 反之返回 slowDelay +} + +func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} +``` + +- MaxOfRateLimiter +这个限速器看着有点乐呵人,内部放多个限速器,然后返回限速最狠的一个延时: + +```go +type MaxOfRateLimiter struct { + limiters []RateLimiter +} + +func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { + ret := time.Duration(0) + for _, limiter := range r.limiters { + curr := limiter.When(item) + if curr > ret { + ret = curr + } + } + + return ret +} +``` + +- WithMaxWaitRateLimiter +这个限速器也很简单,就是在其他限速器上包装一个最大延迟的属性,如果到了最大延时,则直接返回: + +```go +type WithMaxWaitRateLimiter struct { + limiter RateLimiter // 其他限速器 + maxDelay time.Duration // 最大延时 +} + +func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { + return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} +} + +func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { + delay := w.limiter.When(item) + if delay > w.maxDelay { + return w.maxDelay // 已经超过了最大延时,直接返回最大延时 + } + + return delay +} +``` + +### 限速队列的实现 +看完了上面的限速器的概念,限速队列的实现就很简单了: + +```go +func (q *rateLimitingType) AddRateLimited(item interface{}) { + // 内部存了一个延时队列,通过限速器计算出一个等待时间,然后传给延时队列 + q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +} + +func (q *rateLimitingType) NumRequeues(item interface{}) int { + return q.rateLimiter.NumRequeues(item) +} + +func (q *rateLimitingType) Forget(item interface{}) { + q.rateLimiter.Forget(item) +} +``` + + diff --git a/content/zh/blogs/kubernetes-kubesphere-ha.md b/content/zh/blogs/kubernetes-kubesphere-ha.md index 756f69b1e..7ce235b7a 100644 --- a/content/zh/blogs/kubernetes-kubesphere-ha.md +++ b/content/zh/blogs/kubernetes-kubesphere-ha.md @@ -1,6 +1,7 @@ --- title: '手把手从零部署与运营生产级的 Kubernetes 集群与 KubeSphere' -tag: 'Kubernetes,kubesphere' +tag: 'Kubernetes,KubeSphere' +keywords: 'Kubernetes,KubeSphere' createTime: '2020-03-26' author: 'Liu_wt' snapshot: 'https://92uu-blog.oss-cn-beijing.aliyuncs.com/2020-03-25-091655.png' diff --git a/content/zh/blogs/kubesphere-3.2.0-ga-announcement.md b/content/zh/blogs/kubesphere-3.2.0-ga-announcement.md new file mode 100644 index 000000000..124a858c3 --- /dev/null +++ b/content/zh/blogs/kubesphere-3.2.0-ga-announcement.md @@ -0,0 +1,144 @@ +--- +title: 'KubeSphere 3.2.0 发布:带来面向 AI 场景的 GPU 调度与更灵活的网关' +tag: 'KubeSphere,release' +keyword: '社区, 开源, 贡献, KubeSphere, release, AI, GPU' +description: 'KubeSphere 3.2.0 新增了对“GPU 资源调度管理”与 GPU 使用监控的支持,进一步增强了在云原生 AI 场景的使用体验。同时还增强了“多集群管理、多租户管理、可观测性、DevOps、应用商店、微服务治理”等特性,更进一步完善交互设计,并全面提升了用户体验。' +createTime: '2021-11-03' +author: 'KubeSphere' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/v3.2.0-GA-cover.png' +--- + +![](https://pek3b.qingstor.com/kubesphere-community/images/3.2.0GA.png) + +现如今最热门的服务器端技术是什么?答案大概就是**云原生**!KubeSphere 作为一个以 Kubernetes 为内核的云原生分布式操作系统,也是这如火如荼的云原生热潮中的一份子。KubeSphere 持续秉承 100% 开源的承诺,借助于开源社区的力量,迅速走向全球。 + +2021 年 11 月 3 日,KubeSphere 开源社区激动地向大家宣布,KubeSphere 3.2.0 正式发布! + +6 个月前,KubeSphere 3.1.0 带着 **“边缘计算”**、**“计量计费”** 等功能,将 Kubernetes 从云端扩展至边缘,更进一步完善交互设计提升了用户体验。3 个月前,KubeSphere 又发布了 v3.1.1,在部署 KubeSphere 时可以指定 Kubernetes 集群中已有的 Prometheus。 + +今天,KubeSphere 3.2.0 带来了更多令人期待的功能,新增了对 **“GPU 资源调度管理”** 与 GPU 使用监控的支持,进一步增强了在云原生 AI 场景的使用体验。同时还增强了 **“多集群管理、多租户管理、可观测性、DevOps、应用商店、微服务治理”** 等特性,更进一步完善交互设计,并全面提升了用户体验。 + +并且,v3.2.0 得到了来自青云科技之外的更多企业与用户的贡献和参与,无论是功能开发、功能测试、缺陷报告、需求建议、企业最佳实践,还是提供 Bug 修复、国际化翻译、文档贡献,这些来自开源社区的贡献都为 v3.2.0 的发布和推广提供了极大的帮助,我们将在文末予以特别致谢! + +## 解读 KubeSphere 3.2.0 重大更新 + +### GPU 调度与配额管理 + +当前随着人工智能机器学习等领域技术的快速发展,市场上涌现了越来越多 AI 公司对服务器集群中 GPU 资源调度管理的需求,其中监控 GPU 使用情况以及 GPU 资源配额管理等需求在社区的呼声很高,在 KubeSphere 中文论坛收到了[很多 GPU 相关的需求](https://kubesphere.com.cn/forum/?q=gpu "很多 GPU 相关的需求"),KubeSphere 本身是一直支持 GPU 的,现在在 v3.2.0 中会将 GPU 的管理变得更易用。 + +KubeSphere 3.2.0 支持可视化创建 GPU 工作负载,支持调度 GPU 资源的任务,同时还支持对 GPU 资源进行租户级配额管理,可对接 Nvidia GPU 或 vGPU 等方案。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111011325039.png) + +### 增强可观测性 + +随着容器和微服务技术的日益流行,系统之间的调用关系将会越来越复杂,系统中运行的进程数量也会暴增。成千上万个进程跑在分布式系统中,使用传统的监控技术很难追踪这些进程之间的依赖关系和调用路径,这时系统内部的可观测性就显得尤为重要。 + +**可观测性是指通过检测一个系统的输出来测量其内部状态的能力**。如果一个系统的当前状态只能通过输出的信息,即**遥测数据**来估计,那么这个系统就被认为是 "可观测的"。可观测性的三板斧包括 Logging、Tracing 和 Metrics,通过这三板斧收集的数据统称为遥测数据。 + +1. 更强大的自定义监控面板 + +KubeSphere 自 v3.1.0 开始便添加了集群层级的自定义监控,可以选择默认模板、上传模板或自定义模板来生成自定义监控面板。KubeSphere 3.2.0 的默认模板加入了对 `Grafana` 的支持,可以通过指定监控面板 URL 或上传 Grafana 监控面板 JSON 文件来导入 Grafana 监控面板,KubeSphere 会自动将 Grafana 监控面板转换为 KubeSphere 的监控面板。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031339897.png) + +针对 GPU 资源也提供了默认的监控模板,并提供了默认指标,减少了用户自定义创建模板编写 YAML 的配置成本。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031336533.png) + +2. 告警通知与日志 + +- 支持通过 HTTPS 与 Elasticsearch 组件通信。 + +- 继 KubeSphere 3.1 支持邮件、钉钉、企业微信、Webhook 和 Slack 等多通知渠道后,3.2.0 新增支持了对告警通知渠道的配置进行测试验证。 + + ![](https://pek3b.qingstor.com/kubesphere-community/images/202111011347330.png) + +3. ETCD 监控面板支持自动为 ETCD Leader 打上 `Leader` 标签。 + +### 多云与多集群管理 + +随着 Kubernetes 在企业中的应用越来越广泛,CNCF 在 2020 年的用户调研中显示有将近 80% 的用户在生产环境运行 2 个以上 Kubernetes 集群。KubeSphere 旨在解决多集群和多云管理的难题,为用户提供统一的控制平面,将应用程序及其副本跨公有云和本地环境分发到多个集群。KubeSphere 还拥有跨集群的可观测性,包括多集群维度的监控、日志、事件和审计日志等。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031148924.png) + +KubeSphere 3.2.0 在跨集群调度层面更进一步,创建跨集群的联邦部署(federatedDeployment) 时,KubeSphere 不仅支持将业务按不同副本数量调度到多个集群,还支持在其详情页指定在多个集群分发的副本总数,以及指定该业务的副本分发到多个集群的任意权重。当用户想要灵活扩展部署并且要将其多副本按不同比例灵活分发到多个集群时,这个功能会非常有用。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031144251.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031147569.png) + +### 运维友好的存储管理 + +持久化存储是企业在生产环境中运行 Kubernetes 最需要关注的能力,稳定可靠的存储为企业的核心数据保驾护航。KubeSphere 3.2.0 的 Console 界面新增了**存储卷管理**功能,管理员可以在**存储类型**(StorageClass)下配置是否允许用户对存储卷进行**克隆**、**快照**和**扩展**等功能,为有状态应用提供更方便的持久化存储运维。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111021538169.png) + +默认情况下,立即绑定 (Immediate) 模式不利于受拓扑结构限制的存储后端,可能会导致 Pod 无法调度。v3.2.0 新增了**延迟绑定** (WaitForFirstConsumer) 模式,该模式可以保证直到 Pod 被调度时才绑定 PVC 和 PV,这样就可以根据 Pod 资源等请求来合理调度。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111021542049.png) + +此前 KubeSphere Console 只支持管理存储卷(PVC),不支持对**存储实例**(PV)资源进行管理。这个功能在 KubeSphere 3.2.0 得以实现,现在用户可以在 Console 界面查看 PV 信息,并对其进行编辑和删除。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111021555864.png) + +用户创建存储卷快照时也可以指定快照类型,即指定 `VolumeSnapshotClass`,这样就可以指定存储后端来创建快照。 + +### 支持集群级别的网关 + +在 KubeSphere 3.1 中只支持项目级别的网关,如果用户的项目过多,势必会造成资源的浪费。而且不同的企业空间中的网关都是相互独立的。 + +KubeSphere 3.2.0 开始支持集群级别的全局网关,所有项目可共用同一个网关,之前已创建的项目网关也不会受到集群网关的影响。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111021611634.png) + +也可以统一纳管所有项目的网关,对其进行集中管理和配置,管理员用户再也不需要切换到不同的企业空间中去配置网关了。由于 K8s 生态中有非常多的 Ingress Controller 可作为网关方案,KubeSphere 3.2.0 将网关后端进行重构后,现在社区任意支持 `v1\ingress` 的 Ingress Controller 都可作为网关方案灵活对接 KubeSphere。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111021612464.png) + +### 认证与授权 + +统一的身份管理和完备的鉴权体系,是多租户系统中实现逻辑隔离不可或缺的能力。除了可对接 AD/LDAP、OAuth2 等身份认证系统,KubeSphere 3.2.0 还内置了基于 `OpenID Connect` 的认证服务,可以为其他组件提供身份认证能力。`OpenID Connect` 是一个基于 OAuth 2.0 规范的用户身份认证协议,它足够简单,但同时也提供了大量的功能和安全选项以满足企业级业务需求。 + +### 面向合作伙伴开放的应用商店 + +应用商店与应用全生命周期管理是 KubeSphere 独有的特色,KubeSphere 基于自研并开源的 [OpenPitrix](https://github.com/openpitrix/openpitrix "OpenPitrix") 实现了这两大特性。 + +KubeSphere 3.2.0 新增了 **“动态加载应用商店”** 的功能,合作伙伴可申请将应用的 Helm Chart 集成到 KubeSphere 应用商店,相关的 Pull Request 被合并后,KubeSphere 应用商店即可动态加载应用,不再受到 KubeSphere 版本的限制。KubeSphere 应用商店内置的 Chart 地址为:[https://github.com/kubesphere/helm-charts](https://github.com/kubesphere/helm-charts "https://github.com/kubesphere/helm-charts"),欢迎社区合作伙伴来提交 Helm 应用,比如 Nocalhost 和 Chaos Mesh 已经通过这种方式将 Helm Chart 集成到了 KubeSphere 3.2.0,方便用户一键部署应用至 Kubernetes。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111031129842.png) + +### KubeSphere DevOps 更加独立 + +KubeSphere DevOps 从 v3.2.0 开始,已经逐步发展为独立的项目 [ks-devops](https://github.com/kubesphere/ks-devops "ks-devops"),最终用户可以自由选择任意的 Kubernertes 作为运行环境。目前,ks-devops 的后端部分已经可以通过 Helm Chart 来安装。 + +Jenkins 作为一款用户基数极大、生态丰富的 CI 引擎,我们会让 Jenkins 真正地“扮演”引擎的角色——退入幕后持续为大家提供稳定的流水线功能。本次新增 CRD PipelineRun 来封装流水线的执行记录,减少了大量和 Jenkins 直接交互的 API,提升 CI 流水线的性能。 + +从 v3.2.0 开始,KubeSphere DevOps 新增支持在基于 containerd 的流水线中构建镜像。未来 KubeSphere DevOps 将作为独立项目,支持前后端独立部署并引入 Tekton 与 ArgoCD 等 GitOps 工具,还将集成项目管理与测试管理平台。 + +### 集群部署更灵活 + +对于自建 K8s 集群和已有 K8s 集群的用户,KubeSphere 为用户分别提供 KubeKey 和 ks-installer 两种部署方式。 + +[KubeKey](https://github.com/kubesphere/kubekey "KubeKey") 是 KubeSphere 社区开源的一款高效集群部署工具,运行时默认使用 Docker , 也可对接 `Containerd` `CRI-O` `iSula` 等 CRI 运行时,且 ETCD 集群独立运行,支持与 K8s 分离部署,提高环境部署灵活性。 + +如果您使用 KubeKey 部署 Kubernetes 与 KubeSphere,以下特性也值得关注: + +- 支持到 Kubernetes 最新版本 v1.22.1,并向下兼容 4 个版本,同时 KubeKey 也新增支持部署 K3s 的实验性功能。 +- 支持 Kubernetes 集群证书自动更新 +- 支持 Internal LoadBalancer 高可用部署模式,降低集群部署复杂度 +- 大部分集成的组件如 Istio、Jaeger、Prometheus Operator、Fluent Bit、KubeEdge、Nginx ingress controller 都已更新至上游较新版本,详见 Release Notes 3.2.0 + +## 用户体验 + +SIG Docs 成员也对 Console 界面的中英文文案进行了全面的重构与优化,使界面文案和术语介绍更加专业准确。并删除了前端的硬编码和串联的 UI 字符串,以更好地支持 Console 界面的本地化和国际化。 + +此外,KubeSphere 社区有多位深度用户参与了对前端的部分功能进行了增强,例如新增支持了对 Harbor 镜像仓库的镜像搜索、添加了对挂载存储卷到 init container 的支持、去除存储卷扩展时工作负载自动重启等特性。 + +参考 [Release Notes 3.2.0](https://github.com/kubesphere/kubesphere/releases/tag/v3.2.0) 了解更多的用户体验优化、功能增强以及 Bug 修复。可通过官方文档两条命令在线安装下载 KubeSphere 3.2.0,离线安装也将在一周左右在社区提供下载。 + +## 致谢 + +以下是参与 KubeSphere 3.2.0 代码与文档贡献的贡献者 GitHub ID,若此名单有遗漏请您与我们联系,排名不分先后。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/v3.2.0-contributors.png) diff --git a/content/zh/blogs/kubesphere-ambassador-certificates.md b/content/zh/blogs/kubesphere-ambassador-certificates.md new file mode 100644 index 000000000..9290bf69d --- /dev/null +++ b/content/zh/blogs/kubesphere-ambassador-certificates.md @@ -0,0 +1,45 @@ +--- +title: '年度 KubeSphere Ambassador 揭晓!快来领取证书!' +tag: 'KubeSphere, 社区' +keyword: '社区, 开源, 贡献, KubeSphere' +description: 'KubeSphere 社区向 2021 年产生的 KubeSphere Ambassador 致谢,并向所有在 KubeSphere 社区分享过实践案例和技术文章的小伙伴致以最诚挚的问候!' +createTime: '2021-11-12' +author: 'KubeSphere' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/ambassador-2021-cover.png' +--- + +2021 年,KubeSphere 社区又收获了逾百位的贡献者,包括代码、中英文文档、技术布道、本地化与国际化等贡献都是我们认可的贡献方式,在此我们对 KubeSphere 社区所有的贡献者与合作伙伴表示衷心的感谢! + +技术布道的贡献方式包括撰写技术博客、用户案例、在社区活动进行公开技术分享等方式,社区设置了 KubeSphere Ambassador 奖项作为激励多次在社区分享过 KubeSphere 落地实践案例与技术文章的成员。 + +此外,KubeSphere 社区今年还成立了用户委员会,每个城市站的站长肩负起了组织线下交流活动,推广和布道 KubeSphere 的责任。10 月 23 日,由 KubeSphere 社区用户委员会杭州站站长尹珉发起和组织的云原生技术交流 Meetup-杭州站,在杭州圆满落下帷幕。关于本次活动的回顾,可以参考本篇文章:[云原生技术交流 Meetup 杭州站(10.23)回放视频 + PPT 分享](https://kubesphere.com.cn/live/meetup-hangzhou1023/)。 + +当然,他们也会采取其他方式比如文章来积极地推广 KubeSphere,帮助更多社区用户熟悉 KubeSphere 的应用场景和最佳实践。 + +从上一次[颁发](https://kubesphere.com.cn/blogs/kubesphere-certificates/)(2021 年 2 月 11 日)到现在,共诞生了 4 位 KubeSphere Ambassador,他们在社区都曾多次通过文章等形式推广和布道 KubeSphere。 + +## 领取证书 + + +| 姓名 | 证书 | +| ---- | ---- | +|Lijie Jiang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/ambassador-jianglijie.png) | +|Min Yin|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/ambassador-yinmin.png) | +|Zhengjun Zhou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/ambassador-zhouzhengjun.png) | +|Shuai Li|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/ambassador-lishuai.png) | + +有很多朋友目前正在或者计划在未来进行类似的分享。所以,KubeSphere 社区开启了 [issue](https://github.com/kubesphere/community/issues/365) 和[表单](https://jinshuju.net/f/Npcqwi)来长期公开征集。 + +如果在 2021 年,您也多次在社区或者互联网平台分享过 KubeSphere 相关的案例实践、技术文章或者相关的视频,但没有获得该证书也并未出现在上述公开名单中,就可以回复 issue 或者填写表单,一经确认,即可为您制作证书和寄送周边礼物。 + +**当然,这是长期征集,所以本文发布后,只要您做了上述动作,即可申请 KubeSphere Ambassador 认证。** + +## 公开致谢 + +KubeSphere 社区向 2021 年产生的 KubeSphere Ambassador 致谢,并向所有在 KubeSphere 社区分享过实践案例和技术文章的小伙伴致以最诚挚的问候! + +## 展望未来 + +分享和交流是社区发展的一大要素,文章则是非常重要的一种分享方式。实践案例和技术文章为更多用户提供了可参考的方向、可学习的知识和经验。另一方面,组织交流活动,也是非常有效的布道方式。 + +KubeSphere 社区欢迎更多的小伙伴,积极地在社区分享原创技术文章;也欢迎更多的用户分享实践案例;同时欢迎更多的小伙伴加入到 KubeSphere 社区用户委员会(加入方式后续会有单独说明),组织更多的线下交流活动,进行云原生技术的推广和布道。 \ No newline at end of file diff --git a/content/zh/blogs/kubesphere-apacheapisix.md b/content/zh/blogs/kubesphere-apacheapisix.md new file mode 100644 index 000000000..a8cf6283c --- /dev/null +++ b/content/zh/blogs/kubesphere-apacheapisix.md @@ -0,0 +1,213 @@ +--- +title: '使用 Apache APISIX 作为 Kubernetes 的 Ingress Controller' +tag: 'Kubernetes, KubeSphere' +keywords: 'Kubernetes, KubeSphere, Apache APISIX, Ingress Controller' +description: '本文描写了如何使用 Apache APISIX 作为 Kubernetes 的 Ingress Controller 及部署过程。' +createTime: '2021-09-02' +author: '张晋涛' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-apacheapisix-cover.png' +--- + +## KubeSphere 介绍 + +KubeSphere 是在 Kubernetes 之上构建的面向云原生应用的系统,完全开源,支持多云与多集群管理,提供全栈的 IT 自动化运维能力,简化企业的 DevOps 工作流。它的架构可以非常方便地使第三方应用与云原生生态组件进行即插即用 (plug-and-play) 的集成。 + +作为全栈的多租户容器平台,KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。KubeSphere 为用户提供构建企业级 Kubernetes 环境所需的多项功能,例如多云与多集群管理、Kubernetes 资源管理、DevOps、应用生命周期管理、微服务治理(服务网格)、日志查询与收集、服务与网络、多租户管理、监控告警、事件与审计查询、存储管理、访问权限控制、GPU 支持、网络策略、镜像仓库管理以及安全管理等。 + +## Apache APISIX 介绍 + +Apache APISIX 是一款开源的高性能、动态云原生网关,由深圳支流科技有限公司于 2019 年捐赠给 Apache 基金会,当前已经成为 Apache 基金会的顶级开源项目,也是 GitHub 上最活跃的网关项目。Apache APISIX 当前已经覆盖了 API 网关,LB,Kubernetes Ingress,Service Mesh 等多种场景。 + +## 前置条件 + +将现有 Kubernetes 集群已纳入 KubeSphere 管理。 + +## 部署 Apache APISIX 和 Apache APISIX Ingress Controller + +我们可以参考 KubeSphere 的文档启用 KubeSphere 的 [AppStore](https://kubesphere.io/docs/pluggable-components/app-store/),或者使用使用 Apache APISIX 的 Helm 仓库来进行部署。这里,我们直接使用 Apache APISIX 的 Helm 仓库进行部署。 + +执行以下命令即可添加 Apache APISIX 的 Helm repo,并完成部署。 + +```bash +➜ ~ helm repo add apisix https://charts.apiseven.com +"apisix" has been added to your repositories +➜ ~ helm repo add bitnami https://charts.bitnami.com/bitnami +"bitnami" has been added to your repositories +➜ ~ helm repo update +➜ ~ kubectl create ns apisix +namespace/apisix created +➜ ~ helm install apisix apisix/apisix --set gateway.type=NodePort --set ingress-controller.enabled=true --namespace apisix +W0827 18:19:58.504653 294386 warnings.go:70] apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition +NAME: apisix +LAST DEPLOYED: Fri Aug 27 18:20:00 2021 +NAMESPACE: apisix +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +1. Get the application URL by running these commands: + export NODE_PORT=$(kubectl get --namespace apisix -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway) + export NODE_IP=$(kubectl get nodes --namespace apisix -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +``` + +验证是否已经成功部署且运行: + +```bash +➜ ~ kubectl -n apisix get pods +NAME READY STATUS RESTARTS AGE +apisix-77d7545d4d-cvdhs 1/1 Running 0 4m7s +apisix-etcd-0 1/1 Running 0 4m7s +apisix-etcd-1 1/1 Running 0 4m7s +apisix-etcd-2 1/1 Running 0 4m7s +apisix-ingress-controller-74c6b5fbdd-94ngk 1/1 Running 0 4m7s +``` + +可以看到相关的 Pod 均已正常运行。 + +## 部署示例项目 + +我们使用 kennethreitz/httpbin 作为示例项目进行演示。这里也直接在 KubeSphere 中完成部署。 + +选择服务 -- 无状态服务,创建即可。 + +![](https://kubesphere-community.pek3b.qingstor.com/images/028268300f567f03792dc7a06445662e.jpg) + +![](https://kubesphere-community.pek3b.qingstor.com/images/856bdcb7899b5d77c187283ba59a0efd.jpg) + +在 KubeSphere 的服务和负载界面即可看到部署成功,也可以直接在终端下查看是否已经部署成功。 + +```bash +➜ ~ kubectl get pods,svc -l app=httpbin +NAME READY STATUS RESTARTS AGE +pod/httpbin-v1-7d6dc7d5f-5lcmg 1/1 Running 0 48s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/httpbin ClusterIP 10.96.0.5 80/TCP 48s +``` + +## 使用 Apache APISIX 作为网关代理 + +我们先演示如何使用 Apache APISIX 作为网关代理 Kubernetes 集群中的服务。 + +```bash +root@apisix:~$ kubectl -n apisix exec -it `kubectl -n apisix get pods -l app.kubernetes.io/name=apisix -o name` -- bash +bash-5.1# curl httpbin.default/get +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.default", + "User-Agent": "curl/7.77.0" + }, + "origin": "10.244.2.9", + "url": "http://httpbin.default/get" +} +``` + +可以看到在 Apache APISIX 的 Pod 内可正常访问示例项目。接下来使用 Apache APISIX 对该示例项目进行代理。 + +这里我们使用 curl 调用 Apache APISIX 的 admin 接口,创建一条路由。将所有 host 头为 httpbin.org 的请求转发给 httpbin.default:80 这个实际的应用服务上。 + +```bash +bash-5.1# curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "uri": "/get", + "host": "httpbin.org", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.default:80": 1 + } + } +}' +{"node":{"key":"\/apisix\/routes\/1","value":{"host":"httpbin.org","update_time":1630060883,"uri":"\/*","create_time":1630060883,"priority":0,"upstream":{"type":"roundrobin","pass_host":"pass","nodes":{"httpbin.default:80":1},"hash_on":"vars","scheme":"http"},"id":"1","status":1}},"action":"set"} +``` + +你会得到类似上面的输出,接下来验证是否代理成功: + +```bash +bash-5.1# curl http://127.0.0.1:9080/get -H "HOST: httpbin.org" +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.77.0", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "127.0.0.1", + "url": "http://httpbin.org/get" +} +``` + +得到上面的输出,说明已经通过 Apache APISIX 代理了示例项目的流量。接下来我们试试在集群外通过 Apache APISIX 访问示例项目。 + +```bash +root@apisix:~$ kubectl -n apisix get svc -l app.kubernetes.io/name=apisix +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +apisix-admin ClusterIP 10.96.33.97 9180/TCP 22m +apisix-gateway NodePort 10.96.126.83 80:31441/TCP 22m +``` + +在使用 Helm chart 部署的时候,默认会将 Apache APISIX 的端口通过 NodePort 的形式暴露出去。我们使用 Node IP + NodePort 的端口进行访问测试。 + +```bash +root@apisix:~$ curl http://172.18.0.5:31441/get -H "HOST: httpbin.org" +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.76.1", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "10.244.2.1", + "url": "http://httpbin.org/get" +} +``` + +可以看到,在集群外已经可以通过 Apache APISIX 作为网关代理 Kubernetes 集群内的服务了。 + +## 使用 APISIX Ingress Controller 代理服务 + +我们可以直接在 KubeSphere 中添加应用路由(Ingress) ,Apache APISIX Ingress Controller 会自动将路由规则同步至 Apache APISIX 中,完成服务的代理。 + +![](https://kubesphere-community.pek3b.qingstor.com/images/b63268a263e96694fbe1ababd60b7eae.jpg) + +![](https://kubesphere-community.pek3b.qingstor.com/images/78d99df6eb3f3982c6200953464c1656.jpg) + +注意我们添加了 `kubernetes.io/ingress.class: apisix` 的 annotation 配置,用于支持集群内多 ingress-controller 的场景。 + +保存后,可看到如下界面: + +![](https://kubesphere-community.pek3b.qingstor.com/images/9205fa0cb325cdd09805235232f6a6af.jpg) + +在终端下测试是否代理成功: + +```bash +root@apisix:~$ curl http://172.18.0.5:31441/get -H "HOST: http-ing.org" +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "http-ing.org", + "User-Agent": "curl/7.76.1", + "X-Forwarded-Host": "http-ing.org" + }, + "origin": "10.244.2.1", + "url": "http://http-ing.org/get" +} +``` + +可以看到也正常代理了。 + +除了以上方式外,Apache APISIX Ingress Controller 通过 CRD 的方式对 Kubernetes 进行了扩展,你也可以通过发布 ApisixRoute 等这种自定义资源来完成 Kubernetes 中服务的对外暴露。 + +## 总结 + +你可以在 KubeSphere 中使用 Apache APISIX 的官方 Helm 仓库直接部署 Apache APISIX 和 APISIX Ingress Controller 。并且 Apache APISIX 可通过作为网关,或者 APISIX Ingress Controller 的数据面来承载业务流量。 + +## 未来展望 + +Apache APISIX 已经与 KubeSphere 社区达成合作,你可以直接在 KubeSphere 自带的应用仓库中找到 Apache APISIX ,不需要手动添加 Helm 仓库。 diff --git a/content/zh/blogs/kubesphere-argocd.md b/content/zh/blogs/kubesphere-argocd.md index e6b628f27..61ce76161 100644 --- a/content/zh/blogs/kubesphere-argocd.md +++ b/content/zh/blogs/kubesphere-argocd.md @@ -49,7 +49,7 @@ Argo CD 支持的 Kubernetes 配置清单包括 helm charts、kustomize 或纯 Y 源码仓库可参考以下链接,离线环境原因,这里选择第二个示例 spring-demo: -+ [https://github.com/KubeSphere/DevOps-java-sample](https://github.com/KubeSphere/DevOps-java-sample) ++ [https://github.com/KubeSphere/devops-maven-sample](https://github.com/KubeSphere/devops-maven-sample) + [https://github.com/willzhang/spring-demo](https://github.com/willzhang/spring-demo) yaml 文件仓库可参考以下链接,这里命名为 argocd-gitops: @@ -447,7 +447,7 @@ Argo CD Image Updater 部署略显繁琐,部署操作如下: 1、在 Argo CD 中创建本地用户 -创建 Argo CD 镜像更新程序需要访问 Argo CD API Server 的凭据,使用一个 image-updater 具有适当 API 权限的帐户,将以下用户定义添加到 argocd-cm: +创建 Argo CD 镜像更新程序需要访问 Argo CD API Server 的凭据,使用一个 image-updater 具有适当 API 权限的用户,将以下用户定义添加到 argocd-cm: ```bash # kubectl -n Argo CD edit cm argocd-cm diff --git a/content/zh/blogs/kubesphere-certificates.md b/content/zh/blogs/kubesphere-certificates.md index 671dd9372..cf3b8d56e 100644 --- a/content/zh/blogs/kubesphere-certificates.md +++ b/content/zh/blogs/kubesphere-certificates.md @@ -1,6 +1,6 @@ --- title: '除夕夜,来 KubeSphere 社区领证吧!' -tag: 'KubeSphere,社区' +tag: 'KubeSphere, 社区' keyword: '社区, 开源, 贡献, KubeSphere' description: 'KubeSphere 社区管理委员会向此次获得 2020 年度评选产生的 KubeSphere Member、 KubeSphere Ambassador、KubeSphere Talented Speaker、KubeSphere Contributor 成员致谢。' createTime: '2021-02-11' diff --git a/content/zh/blogs/kubesphere-contributor-certificates.md b/content/zh/blogs/kubesphere-contributor-certificates.md new file mode 100644 index 000000000..39bb2f265 --- /dev/null +++ b/content/zh/blogs/kubesphere-contributor-certificates.md @@ -0,0 +1,116 @@ +--- +title: '2021 年新晋 KubeSphere Contributor 们,快来领证吧!' +tag: 'KubeSphere, 社区' +keyword: '社区, 开源, 贡献, KubeSphere' +description: 'KubeSphere 社区向此次获得 2021 年新晋的 KubeSphere Contributor 致谢,并向所有在 KubeSphere 中文论坛分享过技术文章的小伙伴致以最诚挚的问候!' +createTime: '2021-11-19' +author: 'KubeSphere' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-contributor-cover.png' +--- + +2021 年,KubeSphere 社区又收获了逾百位的贡献者,包括代码、中英文文档、技术布道、本地化与国际化等贡献都是我们认可的贡献方式,在此我们对 KubeSphere 社区所有的贡献者与合作伙伴表示衷心的感谢! + +KubeSphere 开源贡献的方式,包括在 GitHub 提交 Pull Request 参与应用的开发及 bug 的修复(即我们常说的贡献代码),或者在 KubeSphere 中文论坛发布过优质的技术博客。社区设置了 KubeSphere Contributor 奖项作为激励多次为 KubeSphere 贡献代码的成员。 + +从上一次[颁发](https://kubesphere.com.cn/blogs/kubesphere-certificates/)(2021 年 2 月 11 日)到现在,共诞生了 76 位 KubeSphere Contributor,他们都曾为 KubeSphere 贡献过代码。 + +另外,在 10 月份,作为 Hacktoberfest 活动的项目之一,KubeSphere 也开启了为期一个月的 Hacktoberfest 活动月。只要在 10 月 1 日至 31 日期间(在任何时区)提交的 PR 数量达到 4 个,就可以获得官方限定的 2021 年 Hacktoberfest T恤、KubeSphere 周边礼品和证书。活动开启后,有些社区小伙伴积极参与并达到了要求,他们的证书在本次一并发放。 + +## 领取证书 + +| GitHub ID 或姓名 | 证书 | +| ---- | ---- | +|123liubao|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-123liubao.png) | +|24sama|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-24sama.png) | +|94rain|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-94rain.png) | +|AbdelouahabMbarki|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-AbdelouahabMbarki.png) | +|alabulei1|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-alabulei1.png) | +|alimy|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-alimy.png) | +|andyli029|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-andyli029.png) | +|Bettygogo2021|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-Bettygogo2021.png) | +|caojiele|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-caojiele.png) | +|cndoit18|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-cndoit18.png) | +|cnscottluo|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-cnscottluo.png) | +|cwen0|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-cwen0.png) | +|cycwll|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-cycwll.png) | +|daixijun|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-daixijun.png) | +|Dishone|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-Dishone.png) | +|dkeven|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-dkeven.png) | +|dkkb|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-dkkb.png) | +|duguhaotian|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-duguhaotian.png) | +|Yang Zhou|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-zhouyang.png) | +|FingerLiu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-FingerLiu.png) | +|flamywhale|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-flamywhale.png) | +|fossabot|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-fossabot.png) | +|Chunlan Fu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-fuchunlan.png) | +|haminhcong|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-haminhcong.png) | +|happywzy|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-happywzy.png) | +|iawia002|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-iawia002.png) | +|iparanoid|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-iparanoid.png) | +|Liang Zhang|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-JohnNiang.png) | +|jonahzheng|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-jonahzheng.png) | +|jrkeen|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-jrkeen.png) | +|karuppiah7890|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-karuppiah7890.png) | +|KONY128|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-KONY128.png) | +|l1ch40|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-l1ch40.png) | +|live77|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-live77.png) | +|Lruler|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-Lruler.png) | +|lshgdut|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-lshgdut.png) | +|Shaohui Liu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-liushaohui.png) | +|Xingmin Lu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-luxingmin.png) | +|Wei Xu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-xuwei.png) | +|mayocream|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-mayocream.png) | +|mazak-ui|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-mazak-ui.png) | +|molliezhang|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-molliezhang.png) | +|Mrxyy|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-Mrxyy.png) | +|muzi502|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-muzi502.png) | +|nanjofan|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-nanjofan.png) | +|nathan-415|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-nathan-415.png) | +|PulkitSinghDev|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-PulkitSinghDev.png) | +|RealHarshThakur|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-RealHarshThakur.png) | +|rockpanda|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-rockpanda.png) | +|rodmiromind|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-rodmiromind.png) | +|ruibaby|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-ruibaby.png) | +|sagilio|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-sagilio.png) | +|sbhnet|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-sbhnet.png) | +|seanly|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-seanly.png) | +|serenashe|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-serenashe.png) | +|shihaoH|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-shihaoH.png) | +|Sigboom|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-Sigboom.png) | +|styshoo|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-styshoo.png) | +|tagGeeY|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-tagGeeY.png) | +|TCeason|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-TCeason.png) | +|txfs19260817|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-txfs19260817.png) | +|VeraXIE1997|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-VeraXIE1997.png) | +|vincenthe-ks|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-vincenthe-ks.png) | +|vinsonzou|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-vinsonzou.png) | +|waynerv|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-waynerv.png) | +|Changjiang Li|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-lichangjiang.png) | +|Kanwen Deng|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-dengkanwen.png) | +|Zhisong Weng|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-wengzhisong.png) | +|x893675|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-x893675.png) | +|Yangmao Zhang|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-zhangyangmao.png) | +|yJunS|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-yJunS.png) | +|Yongjun Du|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-duyongjun.png) | +|yuezhuangshi|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-yuezhuangshi.png) | +|yuyicai|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-yuyicai.png) | +|zhanglin-doudou|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-zhanglin-doudou.png) | +|Han Zhu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/contributor-zhuhan.png) | + +> 因贡献者都是从 GitHub 仓库导出,因此大部分证书是以 GitHub ID 进行制作的,如果您希望以自己的真实姓名制作证书,请联系小 kk(微信搜索 kubesphere),进行二次制作。 + +还有很多默默在 GitHub 提交过 Pull Request 的来自全球各地的社区贡献者,我们尚且还不知道这些 Contributor 的名字或联系方式。因此 KubeSphere 社区开启了 [issue](https://github.com/kubesphere/community/issues/367) 和[表单](https://jinshuju.net/f/Npcqwi)来长期公开征集。 + +如果您为 KubeSphere 贡献过代码,但没有获得该证书也并未出现在上述公开名单中,欢迎回复 issue 或填写表单。一经确认,即可为您制作证书和寄送周边礼物。 + +当然,这是长期征集,在本文发布后,只要您贡献了代码(之前从未贡献过),即可申请 KubeSphere Contributor 认证。 + +## 公开致谢 + +KubeSphere 社区向此次获得 2021 年新晋的 KubeSphere Contributor 致谢,并向所有在 KubeSphere 中文论坛分享过技术文章的小伙伴致以最诚挚的问候! + +## 展望未来 + +贡献代码是参与开源贡献基础的方式,也是不可或缺的方式。作为一个开源项目,KubeSphere 为目前拥有 200+ 贡献者而感到荣幸。 + +贡献者是一个开源项目发展的基础,KubeSphere 欢迎更多的朋友加入到 KubeSphere Contributor 的行列。 diff --git a/content/zh/blogs/kubesphere-core-architecture.md b/content/zh/blogs/kubesphere-core-architecture.md index 9973eeb00..11a9082ba 100644 --- a/content/zh/blogs/kubesphere-core-architecture.md +++ b/content/zh/blogs/kubesphere-core-architecture.md @@ -144,7 +144,7 @@ subjects: ### CRD + controller -自定义资源(Custom Resource) 是对 Kubernetes API 的扩展,可以通过动态注册的方式拓展 K8s API。用户可以使用 kubectl 来创建和访问其中的对象,就像操作内置资源一样。 +定制资源(Custom Resource) 是对 Kubernetes API 的扩展,可以通过动态注册的方式拓展 K8s API。用户可以使用 kubectl 来创建和访问其中的对象,就像操作内置资源一样。 通过 CRD 对资源进行抽象,再通过 controller 监听资源变化维护资源状态, controller 的核心是 Reconcile,与他的意思一样,通过被动、定时触发的方式对资源状态进行维护,直至达到声明的状态。 diff --git a/content/zh/blogs/kubesphere-juicefs.md b/content/zh/blogs/kubesphere-juicefs.md new file mode 100644 index 000000000..60e5e291c --- /dev/null +++ b/content/zh/blogs/kubesphere-juicefs.md @@ -0,0 +1,129 @@ +--- +title: '在 Kubernetes 中安装和使用 JuiceFS 存储' +tag: 'KubeSphere,JuiceFS' +keyword: 'Kubernetes, KubeSphere, JuiceFS, 对象存储 ' +description: '本教程将介绍如何在 KubeSphere 中一键部署 JuiceFS CSI Driver,为集群上的各种应用提供数据持久化。' +createTime: '2021-11-17' +author: '朱唯唯,尹珉' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-juicefs-cover.png' +--- + +## JuiceFS 简介 + +JuiceFS 是为海量数据设计的分布式文件系统,使用对象存储来做数据持久化,避免重复造轮子,还能大大降低工程复杂度,让用户专注解决元数据和访问协议部分的难题。 + +使用 JuiceFS 存储数据,数据本身会被持久化在对象存储(例如,Amazon S3),而数据所对应的元数据可以根据场景需要被持久化在 Redis、MySQL、SQLite 等多种数据库中。 + +## KubeSphere 平台介绍 + +KubeSphere 是在 Kubernetes 之上构建的以应用为中心的多租户容器平台,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。 + +KubeSphere 提供了运维友好的向导式操作界面,即便是 Kubernetes 经验并不丰富的用户,也能相对轻松的上手开始管理和使用。它提供了基于 Helm 的应用市场,可以在可视化界面下非常轻松地安装各种 Kubernetes 应用。 + +---- + +本教程将介绍如何在 KubeSphere 中一键部署 JuiceFS CSI Driver,为集群上的各种应用提供数据持久化。 + +## 前提条件 + +- [安装 KubeSphere](https://v3-1.docs.kubesphere.io/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs/) +- [在 KubeSphere 中启用应用商店](https://kubesphere.com.cn/docs/pluggable-components/app-store/) +- 准备对象存储 + - [创建华为云 OBS](https://support.huaweicloud.com/function-obs/index.html) + - [创建秘钥](https://support.huaweicloud.com/usermanual-ca/zh-cn_topic_0046606340.html) + +## 部署 Redis + +Redis 是 JuiceFS 架构中的关键组件,它负责存储所有元数据并响应客户端对元数据的操作。所以在部署 JuiceFS CSI Driver 之前,需要先部署一个 Redis 数据库,部署详细步骤可参考 [KubeSphere 官方文档](https://kubesphere.com.cn/docs/application-store/built-in-apps/redis-app/)。 + +## 部署 JuiceFS CSI Driver + +KubeSphere 从 3.2.0 开始新增了 “**动态加载应用商店**” 的功能,合作伙伴可通过提交 PR 申请将应用的 Helm Chart 集成到 KubeSphere 应用商店,这样 KubeSphere 应用商店即可动态加载应用。目前 JuiceFS CSI Driver 的 Helm Chart 已经通过这种方式集成到了 KubeSphere 的应用商店,用户可以一键将 JuiceFS CSI Driver 部署至 Kubernetes。 + +首先选择您所需部署的企业空间和项目。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111171258640.png) + +进入项目后,点击“创建”部署新应用。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111171322458.png) + +选择“从应用商店”。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111171323750.png) + +点击目标应用,然后点击“部署”。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111171324754.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111171326929.png) + +修改 backend 参数。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/df9d86c2-590f-4699-a4da-698ffce2e0cf.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/cca8f710-1e56-40c2-b405-ca06189d73de.png) + +验证服务。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/08cfbc9b-808a-40ad-a3a2-6932e15e82b3.png) + +## 部署有状态应用 + +创建有状态副本集。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/499b9863-ea8e-4244-b092-21a728833fe1.png) + +添加自定义名称。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/9cf28790-d656-4ced-a6fd-e1a7f8d3ea8b.png) + +添加容器镜像。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/679cfa7a-054e-401e-8470-f85ed54e805b.png) + +``` +sh,-c,while true; do echo $(date -u) >> /data/out.txt; sleep 5; done +``` + +![](https://pek3b.qingstor.com/kubesphere-community/images/98129318-2906-461e-b86d-cb9fe486eea7.png) + +添加存储卷模板。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/ae2c0266-0b34-493d-bd31-80a7332e3238.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/98e4cbaf-e252-4605-b60a-34518128fbbc.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/e0fa1604-6e8d-460e-b357-30c308972bd1.png) + +检查状态。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/55144d0e-1be3-4cfd-a00a-aa2e00946e88.png) + +## 验证存储卷 + +首先验证创建的 PVC 绑定状态。 + +``` +kubectl get pvc -n kubesphere +``` + +![](https://pek3b.qingstor.com/kubesphere-community/images/2ce0de69-c590-4d4a-b4b9-cec9e692c816.png) + +进入有状态应用检查挂载状态。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/eef27646-6521-43fc-a456-7e9444827368.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/182aae69-5f2b-4aa1-aa5a-0d6972c2ce28.png) + +登录 OBS 查看文件同步状态。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/58287121-07f9-4a3e-ba3b-78be61a3eeee.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/e3d4bf02-db34-4c37-a537-cb1a692d071e.png) + +## 注意事项 + +1. JuiceFS CSI Driver 安装完成任何 namespace 都可以使用; +2. PVC 所属的 pod 归属在 JuiceFS CSI Driver 的 namespace 中; +3. 创建完应用必须进入所声明挂载的文件夹存放数据,远端存储才会同步显示。 diff --git a/content/zh/blogs/kubesphere-member-certificates.md b/content/zh/blogs/kubesphere-member-certificates.md new file mode 100644 index 000000000..ac1c78994 --- /dev/null +++ b/content/zh/blogs/kubesphere-member-certificates.md @@ -0,0 +1,67 @@ +--- +title: '恭贺 2021 年新晋 KubeSphere Member,快来领取证书!' +tag: 'KubeSphere, 社区' +keyword: '社区, 开源, 贡献, KubeSphere' +description: 'KubeSphere 社区向 2021 年新晋的 KubeSphere Member 致谢,感谢各位 Member 深度和积极的开源贡献。' +createTime: '2021-11-26' +author: 'KubeSphere' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-member-cover.png' +--- + +2021 年,KubeSphere 社区又收获了逾百位的贡献者,包括代码、中英文文档、技术布道、本地化与国际化等贡献都是我们认可的贡献方式,在此我们对 KubeSphere 社区所有的贡献者与合作伙伴表示衷心的感谢! + +社区设置了 KubeSphere Contributor 奖项作为激励多次为 KubeSphere 贡献代码的成员。上周已经[颁发了 2021 年度新晋的 KubeSphere Contributor 证书](https://kubesphere.com.cn/blogs/kubesphere-contributor-certificates/)。而对于具有突出贡献的活跃贡献者,我们还设立了专门的 KubeSphere Member 奖项作为激励。 + +从上一次[颁发](https://kubesphere.com.cn/blogs/kubesphere-certificates/)(2021 年 2 月 11 日)到现在,共有 10 位(其中 Daniel Hu 的证书已单独颁发过,此次不再颁布)突出贡献者入选了 KubeSphere Member,均由 KubeSphere 社区技术委员会与 KubeSphere 指导委员会投票通过。他们深度参与了 KubeSphere 社区开源贡献,在社区独立完成了一个或多个功能特性开发、文档撰写以及测试,并帮助 KubeSphere 在多个社区积极布道,推广开源技术,目前已全部邀请加入了 KubeSphere 的 Github 组织。 + +## 领取证书 + +| GitHub ID 或姓名 | 证书 | +| ---- | ---- | +|Shaohui Liu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-liushaohui.png) | +|Yuanpeng Liang|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-liangyuanpeng.png) | +|Dhruv Kela|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-kela.png) | +|Bojan|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-bojan.png) | +|Akhil Mohan|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-mohan.png) | +|Wenhu Wang|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-wangwenhu.png) | +|Wei Xu|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-xuweimango.png) | +|Abdelouahab|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-abdelouahab.png) | +|Liuyi Chen|[点击查看下载证书](https://pek3b.qingstor.com/kubesphere-community/images/member-chenliuyi.png) | + +证书示例: +![](https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-member-certification.png) + +如果您深度参与了 KubeSphere 社区开源贡献,还未入选 KubeSphere Member,欢迎您在 [GitHub](https://github.com/kubesphere/community/issues) 上发起提名申请。示例:[REQUEST: New membership for 123liubao #375](https://github.com/kubesphere/community/issues/375)。 + +经过 KubeSphere 社区技术委员会与 KubeSphere 指导委员会投票通过后,即可入选 KubeSphere Member,并获得此证书。 + +## 公开致谢 + +KubeSphere 社区向 2021 年新晋的 KubeSphere Member 致谢,感谢各位 Member 深度和积极的开源贡献。 + +另外,社区给每个新晋的 KubeSphere Member 都寄了一套 KubeSphere 社区周边礼品作纪念。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/KubeSphere-swag.png) + +## 如何成为 KubeSphere Member + +那么如何成为 KubeSphere Member,成为 KubeSphere Member 之后又能做什么呢? + +想要成为 KubeSphere Member,需要有突出的贡献: + +- 对项目的积极贡献。半年内至少为一个特定的 SIG 代码仓库贡献了一个重要的 PR +- 完成一项或多项功能的开发贡献 +- 由 SIG 的两名成员倡议并由 SIG 的负责人批准 +- 帮助 Review 其他贡献者的 PR + +作为 KubeSphere Member,你可能需要做这些工作: + +- Review 其他贡献者的 PR +- 参与管理 Github issue 和 PR +- 参与与组织社区 SIG 会议 + +## 展望未来 + +近几年,开源发展势头迅猛,越来越多的企业和项目都开始拥抱开源。而对众多的用户而言,参与开源能够塑造个人品牌,认识各地的开发者,提升个人技术、思考以及交流能力。参与贡献开源并不仅仅是“贡献”,而且还是“双赢”。 + +所以,KubeSphere 开源社区欢迎更多的伙伴参与进来,为开源贡献一份力量,合作共赢。也欢迎更多的小伙伴能够入选 KubeSphere Member。 diff --git a/content/zh/blogs/kubesphere-nocalhost.md b/content/zh/blogs/kubesphere-nocalhost.md new file mode 100644 index 000000000..8e58050ad --- /dev/null +++ b/content/zh/blogs/kubesphere-nocalhost.md @@ -0,0 +1,271 @@ +--- +title: '在 Kubernetes 中部署云原生开发工具 Nocalhost' +tag: 'Kubernetes,KubeSphere,Nocalhost' +keyword: 'Kubernetes, KubeSphere, Nocalhost ' +description: '本文将介绍如何在 Kubernetes 中快速部署 Nocalhost Server,用于提供一个帮助研发团队统一管理 Nocalhost 应用部署的管理平台。' +createTime: '2021-11-11' +author: '张海立,玉易才' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-nocalhost-cover.png' +--- + + +## KubeSphere 简介 +KubeSphere 是在 Kubernetes 之上构建的以应用为中心的多租户容器平台,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。 + +KubeSphere 提供了运维友好的向导式操作界面,即便是 Kubernetes 经验并不丰富的用户,也能相对轻松的上手开始管理和使用。它提供了基于 Helm 的应用市场,可以在图形化界面下非常轻松地安装各种 Kubernetes 应用。 + + +## Nocalhost 简介 +[Nocalhost](https://nocalhost.dev/) 是一个允许开发者直接在 Kubernetes 集群内开发应用的工具。 + +Nocalhost 的核心功能是:提供 Nocalhost IDE 插件(包括 VSCode 和 Jetbrains 插件),将远端的工作负载更改为开发模式。在开发模式下,容器的镜像将被替换为包含开发工具(例如 JDK、Go、Python 环境等)的开发镜像。当开发者在本地编写代码时,任何修改都会实时被同步到远端开发容器中,应用程序会立即更新(取决于应用的热加载机制或重新运行应用),开发容器将继承原始工作负载所有的声明式配置(configmap、secret、volume、env 等)。 + +Nocalhost 还提供: + +- VSCode 和 Jetbrains IDE 一键 Debug 和 HotReload +- 在 IDE 内直接提供开发容器的终端,获得和本地开发一致的体验 +- 提供基于 Namespace 隔离的开发空间和 Mesh 开发空间 + +在使用 Nocalhost 开发 Kubernetes 的应用过程中,免去了镜像构建,更新镜像版本,等待集群调度 Pod 的过程,把编码/测试/调试反馈循环(code/test/debug cycle)从分钟级别降低到了秒级别,大幅提升开发效率 + +此外,Nocalhost 还提供了 Server 端帮助企业管理 Kubernetes 应用、开发者和开发空间,方便企业统一管理各类开发和测试环境。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-server.png) + +本文将介绍如何在 KubeSphere 中快速部署 [Nocalhost Server](https://nocalhost.dev/docs/server/server-overview),用于提供一个帮助研发团队统一管理 Nocalhost 应用部署的管理平台。 + +## 前提条件 +### 安装 KubeSphere +安装 KubeSphere 有两种方法。一是在 Linux 上直接安装,可以参考文档:[在 Linux 安装 KubeSphere](https://kubesphere.com.cn/docs/quick-start/all-in-one-on-linux/); 二是在已有 Kubernetes 中安装,可以参考文档:[在 Kubernetes 安装 KubeSphere](https://kubesphere.com.cn/docs/quick-start/minimal-kubesphere-on-k8s/)。 + +### 在 KubeSphere 中启用应用商店 +在 KubeSphere 中启用应用商店可以参考文档:[KubeSphere 应用商店](https://kubesphere.com.cn/docs/pluggable-components/app-store/)。 + + +## 安装 Nocalhost Server +### 在 KubeSphere 3.2 中从应用商店安装 + +Nocalhost Server 已经集成在了 KubeSphere 3.2 的应用商店中了,因此可以直接访问应用商店并按 [常规方式](https://kubesphere.com.cn/docs/project-user-guide/application/deploy-app-from-appstore/) 进行应用部署。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-helm-nocalhost.png) + + +### 在 KubeSphere 3.x 中通过应用仓库安装 +在 KubeSphere 3.x 中,您可以 [通过应用仓库来部署应用](https://kubesphere.com.cn/docs/project-user-guide/application/deploy-app-from-appstore/),下面分步介绍具体的操作过程。 + + +#### 步骤 1:添加应用商店 +首先,使用具备企业空间管理权限的账号登陆 KubeSphere 并进入您选定的一个企业空间,在您的企业空间中,进入「应用管理」下的「应用仓库」页面,并点击「添加仓库」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubesphere-helm-nocalhost.png) + + +在弹出的对话框中,可将应用仓库名称设置为 `nocalhost`,将应用仓库的 URL 设置为 `https://nocalhost-helm.pkg.coding.net/nocalhost/nocalhost`,点击「验证」对 URL 进行验证,验证通过后再点击「确定」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-helm-coding.png) + +> ⚠️ 注意:URL 必须贴全链接,不能缺失 `https://` 这部分,否则会验证失败 + + +应用仓库导入成功后会显示在如下图所示的列表中。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubesphere-helm-done.png) + + + +> 有关添加私有仓库时的更多参数信息,请参见 [导入 Helm 仓库](https://kubesphere.com.cn/docs/workspace-administration/app-repository/import-helm-repository/)。 + + +#### 步骤 2:从应用模版部署应用 +进入您选定的用于部署 Nocalhost Server 的项目,如果还没有可用项目,可以直接打开企业空间页面中的「项目」栏目,「创建」一个新的项目。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-server-setup.png) + + +假设我们已经创建了一个名为 `nocalhost-server` 的项目,进入项目界面,进入「应用负载」下的「应用」页面,再点击「创建」新应用。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-server-setup-app.png) + + +在弹出的对话框中选择「从应用模板」创建。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-app-template.png) + + + +- **从应用商店**:选择内置的应用和以 Helm Chart 形式单独上传的应用。 +- **从应用模板**:从私有应用仓库和企业空间应用池选择应用。 + + +从下拉列表中选择之前添加的私有应用仓库 `nocalhost`,可以看到仓库中的 Nocalhost Server Helm Chart 如下显示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubesphere-helm-chart.png) + +您可以查看「应用信息」和「Chart 文件」,在版本下拉列表中选择版本,然后点击「部署」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-chart-deployment.png) + + +设置应用「名称」,确认应用「版本」和部署「位置」,点击「下一步」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-version.png) + + +在「应用设置」标签页,您可以手动编辑清单文件或直接点击「安装」。建议把 `service.type` 设置为 `ClusterIP`,以确保安装不受 Kubernetes 网络环境影响。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-service-type.png) + + +最后等待 Nocalhost Server 创建完成并开始运行,可以在「应用」中看到如下应用状态(可能需要刷新一下页面)。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-sever-app-status.png) + +### 步骤 3:暴露 Nocalhost Server 服务 +进入「应用负载」下的「服务」页面,选择 `nocalhost-web` 服务,在最右侧的拉下菜单中选择「编辑外部访问」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-web.png) + + +在弹出的对话框中选择合适当前云端网络环境的外网「访问方式」,然后点击「确定」即可应用服务配置。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-cloud-network.png) + + +本文假设我们仍然保持 `ClusterIP` 的访问方式,通过 `kubectl port-forward` 来进行后续的 Nocalhost Server 使用。 + +```bash +❯ kubectl -n nocalhost-server port-forward service/nocalhost-web 8080:80 +Forwarding from 127.0.0.1:8080 -> 80 +Forwarding from [::1]:8080 -> 80 +``` +> ⚠️ 注意:这里的 `nocalhost-server` 请替换为您实际使用的部署了 Nocalhost 应用的 Namespace + +## 使用 Nocalhost Server +> 完成 Port Forward 后可使用 `http://localhost:8080` 来打开 Nocalhost Server Dashboard 页面;使用 `admin@admin.com` 及密码 `123456` 进行登陆。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost8080.png) + +### 创建集群 +> Nocalhost Server 多用于管理整个团队的 Nocalhost 研发环境,因此我们首先需要添加可进行管理的集群。 + +在 Nocalhost Server Dashboard 中选择左侧菜单列表中的「集群」,进入页面后选择「添加集群」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-server-cluster.png) + +在弹出的对话框中输入「集群名称」,并录入 kubectl 可用的、**具备 **`**cluster-admin**`** 权限**的 kubeconfig 文件后「确认」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubeconfig.png) + + +> 目前可导入的 kubeconfig 文件内容还不支持 `exec` 类型的用户凭证,如果您使用的是这里凭证,建议您另外生成一个具有足够权限的 ServiceAccount 并使用其对应的 kubeconfig。 + + + +这里有多种方式获取目标集群的 kubeconfig,例如您可以返回 KubeSphere 并进入集群页面,获取当前集群的 kubeconfig 文件。注意,如果使用 kubeconfig 文件的应用部署在当前集群外,您需要将 `clusters:cluster:server` 参数的值修改为**对外暴露的 Kubernetes API 服务器地址**。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubeconfig-1.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubeconfig-2.png) + +添加成功后,可以得到如下的集群信息页面。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-dashboard-cluster.png) + + +### 创建开发空间(DevSpace) +接下来,我们进入「开发空间」页面,选择「创建开发空间」,并在弹出的对话框中选择「创建隔离开发空间」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-devspace-1.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-devspace-2.png) + + +> 关于「共享开发空间」,即 MeshSpace,可参考 [Manage MeshSpace](https://nocalhost.dev/docs/server/manage-devspace-mesh) 这篇官方介绍。 + + +在弹出的对话框中,可以填写「开发空间名称」(这里设置为 `demo`),选择「集群」和其「所有者」,并按需进行「其它设置」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-devspace-3.png) + + +创建完成后,可以在「开发空间」页面看到已创建的隔离开发空间,如下图所示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-devspace-4.png) + +### 创建 bookinfo 样例应用 +下一步我们开始为团队创建一些可部署的应用,先进入「应用」页面,选择「添加应用」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-bookinfo.png) + + +在弹出的对话框中填写「应用名称」,同时我们继续填写其它信息: + +- 选择 `Git` 作为「安装来源」 +- 输入 `https://github.com/nocalhost/bookinfo.git` 作为「Git 仓库地址」 +- 选择 `Manifest` 作为「Manifest 类型」 +- 「应用配置文件」留空,即使用 `config.yaml` +- 「Git 仓库的相对路径」填入 `manifest/templates` + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-bookinfo-2.png) + + + +> 可访问 GitHub 查看完整的 [bookinfo](https://github.com/nocalhost/bookinfo/) 样例应用仓库,了解详细的配置文件细节。 + + +### 创建用户并共享开发空间 +最后,我们创建一个样例用户来演示如果共享开发空间。进入到「用户」页面后,点击「添加用户」,在弹出的对话框中填入必须的用户信息后「完成」添加。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-test-user.png) + + +然后我们回到开发空间,选择我们之前创建的 `demo` 空间,点击画笔图标进入「编辑开发空间」的「共享用户」标签页,开始「添加共享」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-test-user-2.png) + +选择需要添加的用户,并注意选择默认的 `Cooperator` 协作者权限,另一个 `Viewer` 观察者权限的用户只能浏览开发空间。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-test-user-3.png) + + +至此,我们在 Nocalhost Server Dashboard 中的配置就告一段落,下面将进入 IDE 利用 Nocalhost 插件执行应用的部署。 + +### 执行 bookinfo 应用部署 +> 这里我们将使用 VS Code 执行应用的部署,首先需要 [在 VS Code 中安装 Nocalhost 插件](https://nocalhost.dev/docs/installation#install-vs-code-plugin)。 您也可以使用 [JetBrains 及其 Nocalhost 插件](https://nocalhost.dev/docs/installation#install-jetbrains-plugin)。 + + +在 VS Code 中打开 Nocalhost 插件面板,点击 `+` 号创建集群连接,填入 Nocalhost Server 地址,并使用前面创建的普通用户 `test` 的用户名及密码进行登录。 +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-vscode-test.png) + + +创建成功可以看到之前在 Nocalhost Server Dashboard 中创建的开发空间 `demo(nh1btih)`。 +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-dashboard-demo.png) + + +点击 `demo` 空间右侧的火箭图标,会在 VS Code 编辑器顶部加载应用列表,如下图所示可以看到之前添加的 `bookinfo` 应用。 +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-vscode-bookinfo.png) + +选择该应用即会启动在 `demo` 空间中的 Nocalhost 应用部署过程(选择应用源的默认分支进行安装即可),安装完成后,会出现如下日志和弹窗提示: +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-access.png) + + +同时在 Nocalhost 插件面板中也可以展开 `Workload` 看到具体的部署内容。 +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-workload.png) + + +再往后就可以依照 Nocalhost 常规方式 [进入开发模式](https://nocalhost.dev/zh-CN/docs/quick-start#3-%E8%BF%9B%E5%85%A5%E5%BC%80%E5%8F%91%E6%A8%A1%E5%BC%8F),体验更便捷的云原生微服务开发过程! + +## 开发体验 +- 点击绿色图标即可进入开发模式,选择从 git 仓库克隆源码。(第一次进入开发模式,Nocalhost 会提示你选择源码目录,可以直接选择本地的源码目录,也可以选择从 git 仓库克隆到本地,后续 Nocalhost 会记住源码目录,再次进入开发模式时,会直接打开这个源码目录) +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-git-1.png) +- 进入开发模式后 Nocalhost 会自动将工作负载的镜像替换为开发镜像,并且将源码同步到远程容器中。 +- 进入开发模式后会自动打开一个远程容器的终端,开发者可以在这个终端里面执行命令,运行开发程序。 +- 右键工作负载 authors,点击 Remote run,即可在远端容器里面运行在开发配置里面预设好的应用运行命令。 +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-kubesphere-remoterun.png) +- 更改代码并保存后,Nocalhost 自动将更改后的代码文件同步到远程容器中。(若在开发配置里面配置了该 hotReload: true 参数, nocalhost 将会自动在远程容器中再次运行预设好的应用运行命令,更改代码后无需要再次点击 Remote run 或者手动运行应用运行命令) + +### 远程调试 +- 右键工作负载 authors,点击Remote debug,即可进入远程 debug 模式。 +- 打上断点,在浏览器访问 http://127.0.0.1:39080/productpage,即可进行远程 debug + +![](https://pek3b.qingstor.com/kubesphere-community/images/nocalhost-git-3.png) diff --git a/content/zh/blogs/kubesphere-release-note-post.md b/content/zh/blogs/kubesphere-release-note-post.md index c400d392a..39de7cf83 100644 --- a/content/zh/blogs/kubesphere-release-note-post.md +++ b/content/zh/blogs/kubesphere-release-note-post.md @@ -1,6 +1,7 @@ --- title: 'KubeSphere 容器平台发布 2.1.1,全面支持 Kubernetes 1.17' -tag: 'Kubernetes,release,kubesphere' +tag: 'Kubernetes,release,KubeSphere' +keywords: 'KubeSphere, Kubernetes' createTime: '2020-02-24' author: 'Feynman' snapshot: 'https://pek3b.qingstor.com/kubesphere-docs/png/20200224093525.png' diff --git a/content/zh/blogs/kubesphere-route.md b/content/zh/blogs/kubesphere-route.md deleted file mode 100644 index f2dc17de6..000000000 --- a/content/zh/blogs/kubesphere-route.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: '深入浅出 Kubernetes 项目网关与应用路由' -tag: 'KubeSphere, Kubernetes' -keywords: 'KubeSphere, Kubernetes, Gateway, 网关, Spring Cloud' -description: '本篇内容简述了应用路由的基本架构,并与 Kubernetes Service 及其他应用网关分别做了对比。最后通过 SockShop 这个案例讲解的应用路由的配置方法。' -createTime: '2021-07-28' -author: '马岩' -snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202109071715557.png' ---- - -KubeSphere 项目网关与应用路由提供了一种聚合服务的方式,将集群的内部服务通过一个外部可访问的 IP 地址以 HTTP 或 HTTPs 暴露给集群外部。应用路由定义了这些服务的访问规则,用户可以定义基于 host 主机名称和 URL 匹配的规则。同时还可以配置 HTTPs offloading 等选项。项目网关则是应用路由的具体实现,它承载了流量的入口并根据应用路由规则将匹配到的请求转发至集群内的服务。 - -## 整体架构 - -用户的服务和应用路由的架构密不可分,因此我们需要结合用户服务来理解项目网关的整体架构。一个典型生产环境中,项目网关架构如下图所示: - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370451-193428-kubernetes-ingress.png) - -图中组件共分为四个部分: - -1. `Nginx Ingress Controller` 是应用网关的核心组件。KubeSphere 项目网关基于 `Nginx Ingress Controller` 实现,它通过获 `Ingress` 对象生成 Nginx 反向代理规则配置并配置应用于 Nginx 服务。应用路由是一个 `Ingress` 对象。应用网关依赖于 `Service` 对外暴露 Nginx 服务,因此 `Service` 在生产环境中一般设置为 `LoadBalancer` 类型,由云服务商配置其公有云 IP 地址及外部负载均衡器,用以保障服务的高可用性。 -2. 外部负载均衡器,应用网关的 `Service` 生成的外部负载均衡器,一般由各个云服务商提供。因此每种负载均衡器的特性有很多差别,比如 SLA、带宽、IP 配置等等。我们一般可以通过服务商提供的注解对其进行配置,在设置网关时,我们通常需要了解这些特性。 -3. DNS 域名解析服务, 一般由域名服务商提供服务,我们可以配置域名解析纪录将域名指向 `LoadBalancer` 的公网 IP。如果子域名也指向同一 IP,我们可以可使用泛域名解析方式。 -4. 用户服务与应用路由,用户需要为应用程序创建 `Service` 用于暴露集群内的服务,然后创建应用路由对外暴露服务。注,`Nginx Ingress Controller` 并不通过 `Kube-proxy` 访问服务 IP。它通过服务查找与之关联 `POD` 的 `EndPoint`,并将其设置为 `Nginx` 的 `Upstream`。Nginx 直接连接 `POD` 可以避免由 `Service` 带来的额外网络开销。 - -### 应用路由 vs Service(type=LoadBalancer) - -在实践过程中,应用路由与 `Service` 的应用场景常常令人混淆。它们都可以向集群外暴露集群内服务,并提供负载均衡功能。并且应用路由看起来也是*依赖*于服务的,那么他们究竟有何区别呢?这个问题我们需要从以下几个角度理解。 - -1. `Service` 最初的设计动机是将某个服务的后端(Pod)进行抽象并公开为网络服务。它通常是以一个服务为单位的,所有后端均运行相同的服务端。而`应用路由`的设计目标是对 API 对象进行管理。它虽然也可以暴露一个服务,但是它更强大的功能在于其可以将一系列服务进行聚合,对外提供统一的访问 IP、域名、URL 等。 -2. `Service` 工作在 TCP/IP 协议的第四层,因此它使用 `IP+端口+协议` 三元组作为服务的唯一标识。因此当我们需要暴露一个服务时,它不能与其他已存在的服务冲突。例如,我们暴露基于 HTTP/HTTPs 的服务时,通常这类服务都会占用 80、443 端口,为了避免端口冲突,就需要为每个暴露的服务申请一个独立的 IP 地址,导致资源浪费。`应用路由`工作在七层,所有通过应用路由暴露的服务都可以共享项目网关的 IP 地址和 80、443 端口。每个`应用路由`使用 `Host+URL` 作为服务的唯一标识,将 HTTP 请求转发到后端服务中。 -3. `Service` 支持 TCP 与 UDP 协议并且对上层协议没有限制,而应用路由目前只支持 HTTP/HTTPs 或 HTTP2 协议,无法转发基于 TCP 或 UDP 的其他协议。 - -结合以上三点,我们不难得看出:应用路由更适用于使用 HTTP 协议的微服务架构的场景中,而 `Service` 虽然对 HTTP 协议没有深度的支持,但是它可以支持更多其他协议。 - -### 应用路由 vs Spring Cloud Gateway 或 Ocelot - -Java、.net Core 的开发人员对 `Spring Cloud Gateway` 或 `Ocelot` 一定不会感到陌生,他们是各自语言领域中最常用的 API 网关。那么到我们是否可以直接使用这些网关呢?理解这个问题,我们首先要知道什么是 API 网关,在 Wiki 百科中 `API Gateway` 并没有一个明确的定义,但我们从各个大厂的服务说明中可以得出一个基本的结论: - -> API 网关作为用户与后端服务之间的唯一入口管理后端服务,即 API 网关提供了一个方向代理服务将后端服务进行聚合,将客户端请求路由到后端服务并将结果返回给客户端。同时,API 网关可提供身份认证、监控、负载均衡、HTTPS offloading 等高级功能。 - -因此,应用路由承担了 API 网关的职责,即它与 `Spring Cloud Gateway` 或 `Ocelot` 等 API 网关具有同等地位。诸如 `Spring Cloud Gateway` 类的 API 网关通过 `Service` 的方式暴露到集群外部也可替代部分应用路由功能。我们接下做一个简要的对比,并分析一下他们的优缺点: - -1. 作为应用网关的基本职责,它们均具有路由转发功能。并且以上提到的网关均支持基于 HOST、URL 的路由转发规则设置。 -2. 服务注册与发现,`Spring Cloud Gateway` 等全家桶式解决方案提供了非常丰富的支持选项,对于 java 开发者更为友好,网关上的服务均可通过注册中心服务无缝衔接。而 Ocelot 虽然未内置服务发现与注册方案,但是可以通过 Ocelot + Consul 的方式实现。对比之下 Kubernetes 集群中部署应用,一般采用基于 DNS 的服务发现方式,但并没有为客户端提供一个统一的服务注册发现方式。对外暴露的服务需要显示的创建 Ingress 规则。相比之下 `Spring Cloud Gateway` 类的 API 网关使用相同技术栈,这可以极大的简化开发人员的学习成本。 -3. 通用性上,Ingress 是云原生背景下 Kubernetes 社区定义的 API 管理规范。KubeSphere 默认采用 `Nginx Ingress Controller`实现。同时我们可以使用任何兼容的第三方 Ingress 控制器进行替换。Ingress 中只定义了基本共性的功能,但网关通常会提供日志、监控、安全等更多通用的运维工具。相比之下,与语言紧密结合的 API 网关通常与开发平台进行绑定,语言相互替代性较差(不愿引入更多技术栈或无客户端集成支持)。功能相对固定,但大多提供了良好的插件机制,开发人员使用自己熟悉的语言进行拓展。 -4. 性能方面,毋庸置疑,以基于 Nginx 的 Ingress Controller 为代表的通用型 API 网关,比 `Spring Cloud Gateway`、`Ocelot` 等有非常明显的性能优势。 - -总体来讲,每种网关都有其优缺点或局限性。在项目初期应首先考虑应用网关的架构。在基于云原生的场景下,应用路由会是一个不错的选择。而如果您的团队依赖于开发技术栈,那么常用技术栈中的 API 网关通常也会作为首选。但这并不意味着它们必须进行二选一,在一些复杂场景下我们可以结合二者的优势,开发人员使用自己熟知的 API 网关用于服务聚合、认证鉴权等功能,同时在其前方放置应用网关实现日志监控,负载均衡,HTTPs offloading 等工作。 - -微软官方微服务架构示例 [eShopOnContainers](https://docs.microsoft.com/en-us/dotnet/architecture/cloud-native/introduce-eshoponcontainers-reference-app "eShopOnContainers") 即采用了该种混合架构。 - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370654-571190-eshoponcontainers-architecture-aggregator-services.png) - -## 动手实战 - -理解以上应用场景和整体架构后,我们接下来演示如何在 KubeSphere 中配置项目网关和应用路由。以下内容将基于 Weaveworks 的微服务演示项目 SockShop 实现。SockShop 是一个典型的前后端分离架构,它由前端服务 `front-end` 和若干后端服务 `catalogue`、`carts`、`orders` 等组成。在当前架构下,`front-end` 除了承担静态页面服务的功能,还承担了后端 API 代理转发的任务。我们假设以下场景,即由 Nodejs 转发 API 造成服务异步阻塞,从而影响页面性能。因此我们决定使用 ingress 直接转发服务 `catalogue` 用以提升性能。下面我们看一下详细配置步骤。 - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627370560-468146-socksshop.png) - -### 准备工作 - -1. 在部署 SockShop 之前,我们首先要配置一个用于演示的企业空间 `workspace-demo` 和项目 `sock-shop`。具体步骤请参考[《创建企业空间、项目、帐户和角色》](https://kubesphere.com.cn/docs/quick-start/create-workspace-and-project/ "《创建企业空间、项目、帐户和角色》") - -2) 完成项目 `sock-shop` 的创建后,我们接下来使用 `kubectl` 部署 SockShop 的相关服务。您可以使用本地的控制台或 KubeSphere web 工具箱中的 `kubectl`执行以下命令。 - -``` -kubectl -n sock-shop apply -f https://github.com/microservices-demo/microservices-demo/raw/master/deploy/kubernetes/complete-demo.yaml -``` - -执行过后可以进入 `sock-shop` 的`工作负载`页面查看部署的状态,等待所有的部署都正常运行后,我们再进行下一步操作。 - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371198-6886-workload.png) - -### 项目网关配置 - -1. 进入 `sock-shop` 项目,从左侧导航栏进入项目设置下的高级设置页面,然后点击设置网关。 - -2. 在接下来弹出的对话框中,需要根据 KubeSphere 的安装环境进行设置。如果您使用的是本地开发环境或私有环境可以选择 `NodePort` 的方式暴露网关。如果是托管 Kubernetes 云服务,一般选择 LoadBalancer。 - -### 应用路由配置 - -1. 首先,我们选择左侧导航栏**应用负载**中的**应用路由**,点击右侧的创建。在基本信息中填写名称 `frontend`。在路由规则中,添加一条新的规则。由于是演示项目,我们使用自动生成模式。KubeSphere 自动以<服务名称>.<项目名称>.<网关地址>.nip.io 格式生成域名,该域名由 nip.io 自动解析为网关地址。在路径、服务、端口上依次选择 "/"、"front-end"、"80"。点击**下一步**后,继续点击**创建**。 - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371226-863229-router.png) - -2. 路由创建完成后,可以在应用路由列表页面点击 `frontend` 进入详情。并在规则中可以点击**点击访问**访问按钮。在新的浏览器 tab 下,应该出现如下的网站: - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371245-961841-sockshop.png) - -3. 为了与下面的步骤进行对比,我们在 SockShop 的网站页面打开调试功能查看网络请求,以 Chrome 为例只需点击键盘的**F12**键。刷新一下页面后我们找到如下 `catalogue` API 请求: - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371262-490907-f12.png) - -该请求头中的 `X-Powered-By:Express` 表明了这条请求是由前端的 Nodejs 应用转发。 - -4. 接下来,在 `frontend` 的详情页面点击左侧的**更多操作**,并选择**编辑规则**。在弹出的编辑规则页面,选择刚刚增加的规则,并点击左侧的编辑图标。新增一条路径,在路径、服务、端口上依次选择"/catalogue"、"catalogue"、"80"。保存该设置。编辑后的规则如下: - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371282-336585-router2.png) - -5. 我们再次访问 SockShop 的网站页面,该页面并没有任何变化。我们使用浏览器调试器,再次查看网络请求,`catalogue` 的请求如下: - -![](https://kubesphere.com.cn/forum/assets/files/2021-07-27/1627371313-315498-f12-after.png) - -我们发现该请求已经没有了 `X-Powered-By:Express` 请求头,这说明了我们上面应用的规则已经生效,`catalogue`相关的 API 请求已经通过应用路由直接转发 `catalogue` 服务了,而不需要再通过 `fron-tend` 服务进行中转。以上的配置我们利用了路由规则的最长匹配规则。“/catalogue”比更路径具有更高的优先级。 - -更多配置内容可以参考[《应用路由》](https://kubesphere.com.cn/docs/project-user-guide/application-workloads/routes/ "《应用路由》") - -## 总结 - -本篇内容简述了应用路由的基本架构,并与 Kubernetes Service 及其他应用网关分别做了对比。最后通过 SockShop 这个案例讲解的应用路由的配置方法。希望读者对应用路由能有进一步的理解,根据应用的特性选择合适的外部服务暴露方式。 \ No newline at end of file diff --git a/content/zh/blogs/kubesphere-speaker-certificates.md b/content/zh/blogs/kubesphere-speaker-certificates.md new file mode 100644 index 000000000..7bb48f6f3 --- /dev/null +++ b/content/zh/blogs/kubesphere-speaker-certificates.md @@ -0,0 +1,87 @@ +--- +title: '年度 KubeSphere Talented Speaker 揭晓!快来领取证书!' +tag: 'KubeSphere, 社区' +keyword: '社区, 开源, 贡献, KubeSphere' +description: 'KubeSphere 社区向新诞生的 26 位 2021 年度的 KubeSphere Talented Speaker 致谢,欢迎更多的人加入到 KubeSphere Talented Speaker 的行列。' +createTime: '2021-10-15' +author: 'KubeSphere' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/talented-speaker-cover.png' +--- + +2021 年,KubeSphere 社区又收获了逾百位的贡献者,包括代码、中英文文档、技术布道、本地化与国际化等贡献都是我们认可的贡献方式,在此我们对 KubeSphere 社区所有的贡献者与合作伙伴表示衷心的感谢! +技术布道的贡献方式包括撰写技术博客、用户案例、在社区活动进行公开技术分享等方式,社区设置了 KubeSphere Talented Speaker 奖项作为激励参与社区活动公开技术分享的讲师。 + +从上一次[颁发](https://kubesphere.com.cn/blogs/kubesphere-certificates/)(2021 年 2 月 11 日)到现在,共诞生了 26 位 Talented Speaker,他们都参与过 2021 年 KubeSphere 社区组织的 KubeSphere Meetup 及云原生技术直播活动: + +[KubeSphere and Friends | Kubernetes and Cloud Native Meetup ——上海站](https://kubesphere.com.cn/live/meetup-shanghai/) + +[KubeSphere and Friends | Kubernetes and Cloud Native Meetup ——杭州站](https://kubesphere.com.cn/live/meetup-hangzhou/) + +[KubeSphere and Friends | Kubernetes and Cloud Native Meetup ——成都站](https://kubesphere.com.cn/live/meetup-chengdu/) + +[KubeSphere and Friends | Kubernetes and Cloud Native Meetup ——北京站](https://kubesphere.com.cn/live/meetup-beijing/) + +[使用 Flomesh 进行 Dubbo 微服务的服务治理](https://kubesphere.com.cn/live/pipy819-live/) + +[Apache APISIX Ingress Controller 实现与上手实践](https://kubesphere.com.cn/live/apisix826-live/) + +[Kubernetes 上的图数据库](https://kubesphere.com.cn/live/nebula0902-live/) + +[Kubernetes 控制器原理简介](https://kubesphere.com.cn/live/uisee0916-live/) + +[Kubebuilder 使用简介](https://kubesphere.com.cn/live/uisee0923-live/) + + +KubeSphere Talented Speaker 是授予在 KubeSphere 社区云原生技术直播活动、2021 年组织的 KubeSphere Meetup 及其他技术分享活动上进行过公开技术分享的优秀讲师,他们是帮助 KubeSphere 社区积极布道的技术传播者,具备出色的技术演讲能力。 + +通过他们的积极布道,越来越多的人去学习了解 KubeSphere,当然不只是 KubeSphere,还有其他多种云原生技术。得益于他们的分享,大批的社区用户,尤其是云原生领域的新人,学到了非常多的云原生知识和经验。 + + +## 领取证书 + + +| 姓名 | 证书 | +| ---- | ---- | +|Yu Li|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-liyu.png) | +|Ming Tang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-tangming.png) | +|Xingxiang Yang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-yangxingxiang.png) | +|Riyao Gao|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-gaoriyao.png) | +|Lijie Jiang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-jianglijie.png)| +|Fei Xu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-xufei.png) | +|Lei Qiao|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-qiaolei.png) | +|Ning Qi|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-qining.png) | +|Xiaofan Luan|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-luanxiaofan.png) | +|Zongwei Tan|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-tanzongwei.png) | +|Zhengjun Zhou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zhouzhengjun.png) | +|Ju Lou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-louju.png) | +|Chao Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zhangchao.png) | +|Weifeng Sheng|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-shengweifeng.png) | +|Haili Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zhanghaili.png) | +|Yuchuan He|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-heyuchuan.png) | +|Tao Hu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-hutao.png) | +|Zhenfei Pei|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-peizhenfei.png) | +|Sheng Zou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zousheng.png) | +|Pei Zhou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zhoupei.png) | +|Changqing Wu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-wuchangqing.png) | +|He Huang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-huanghe.png) | +|Yang Lin|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-linyang.png) | +|Siwei Gu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-gusiwei.png) | +|Xiaomeng Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-zhangxiaomeng.png) | +|Dehan Liu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/speaker-liudehan.png) | + + +如果在 2021 年,您也多次在其他技术交流会议上分享过 KubeSphere 相关的经验和技术,但没有获得该证书也并未出现在上述公开名单中,请微信联系助手小 KK `kubesphere`,备注「优秀讲师证书」,我们将为您定制 KubeSphere 证书与纪念周边。 + +## 公开致谢 + +KubeSphere 社区向此次获得 2021 年产生的 KubeSphere Talented Speaker 致谢,并向所有参与过 KubeSphere 社区技术交流会议及相关直播活动的小伙伴致以最诚挚的问候! + +![合影](https://pek3b.qingstor.com/kubesphere-community/images/2021meetup-puzzle.png) + +## 展望未来 + +社区的发展离不开分享和交流,技术亦如此。KubeSphere 社区很荣幸和如此多的优秀讲师相遇。 + +如果您对技术分享和传播感兴趣,如果您非常希望交流和分享 KubeSphere 相关的乃至云原生领域的技术和经验,那么非常欢迎您参与技术交流活动,成为一名 KubeSphere Talented Speaker。 + +**KubeSphere 社区欢迎更多的人加入到 KubeSphere Talented Speaker 的行列。** diff --git a/content/zh/blogs/kubesphere-values.md b/content/zh/blogs/kubesphere-values.md index 4f5bcf35d..566a4498b 100644 --- a/content/zh/blogs/kubesphere-values.md +++ b/content/zh/blogs/kubesphere-values.md @@ -1,7 +1,7 @@ --- title: '一文说清 KubeSphere 容器平台的价值' -tag: 'DevOps, 产品介绍, KubeSphere, 微服务' -tag: 'DevOps, Kubernetes, KubeSphere, Observability, microservice' +tag: 'DevOps, KubeSphere, 微服务' +keywords: 'DevOps, Kubernetes, KubeSphere, Observability, microservice' createTime: '2020-04-10' author: 'Feynman, Ray' snapshot: 'https://pek3b.qingstor.com/kubesphere-docs/png/20200410130334.png' diff --git a/content/zh/blogs/litmus-kubesphere.md b/content/zh/blogs/litmus-kubesphere.md index 37437d76b..fc81bd631 100644 --- a/content/zh/blogs/litmus-kubesphere.md +++ b/content/zh/blogs/litmus-kubesphere.md @@ -91,7 +91,7 @@ Litmus 的组件可以划分为两部分: 本教程将使用 KubeSphere 的应用模板来部署 Litmus。 -要想从应用模板部署应用,需要创建一个企业空间、一个项目和两个用户帐户(`ws-admin` 和 `project-regular`)。`ws-admin` 必须被授予企业空间中的 `workspace-admin` 角色, `project-regular` 必须被授予项目中的 `operator` 角色。在创建之前,我们先来回顾一下 KubeSphere 的多租户架构。 +要想从应用模板部署应用,需要创建一个企业空间、一个项目和两个用户用户(`ws-admin` 和 `project-regular`)。`ws-admin` 必须被授予企业空间中的 `workspace-admin` 角色, `project-regular` 必须被授予项目中的 `operator` 角色。在创建之前,我们先来回顾一下 KubeSphere 的多租户架构。 ### 多租户架构 @@ -103,7 +103,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 ### 创建帐户 -安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个帐户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个帐户 `user-manager`,然后使用 `user-manager` 创建新帐户。 +安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个用户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个用户 `user-manager`,然后使用 `user-manager` 创建新帐户。 1. 以 `admin` 身份使用默认帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。 @@ -113,7 +113,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 ![](https://pek3b.qingstor.com/kubesphere-community/images/20210602121105.png) - 在**帐户角色**中,有如下所示四个可用的内置角色。接下来要创建的第一个帐户将被分配 `users-manager` 角色。 + 在**帐户角色**中,有如下所示四个可用的内置角色。接下来要创建的第一个用户将被分配 `users-manager` 角色。 | 内置角色 | 描述 | | -------------------- | ------------------------------------------------------------ | diff --git a/content/zh/blogs/mysql-on-k8s.md b/content/zh/blogs/mysql-on-k8s.md new file mode 100644 index 000000000..fb124ed19 --- /dev/null +++ b/content/zh/blogs/mysql-on-k8s.md @@ -0,0 +1,189 @@ +--- +title: 'MySQL on K8s:开源开放的 MySQL 高可用容器编排方案' +tag: 'KubeSphere, Kubernetes, RadonDB' +keywords: 'KubeSphere, Kubernetes, MySQL, RadonDB' +description: '传统方式部署的 MySQL 在运维成本和资源弹性方面极具挑战,而 RadonDB 借助云原生有状态应用弥补了运维成本和弹性方面的不足,充分利用云的特点,实现基于云原生的高可用 MySQL 集群解决方案。' +createTime: '2021-06-17' +author: '高日耀' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/radondb-cover.png' +--- + +> 本文是上海站 Meetup 讲师高日耀根据其分享内容整理的文章。[点击查看视频回放](https://kubesphere.com.cn/live/mysql-shanghai/) + +## MySQL 运维有哪些挑战? + +![](https://pek3b.qingstor.com/kubesphere-community/images/radondb-1.jpg) + +传统的物理部署方式,即把数据库部署在物理机上。对运维人员而言,会遇到图 1 中四个维度的挑战。 + +### 1、成本 + +自建 MySQL 数据库集群,需要的硬件设备包括:**服务器、网络交换机、内存、CPU、硬盘等** 硬件设备。 + +硬件设备成本包括:**选型、购买、维护、升级、损坏、数据丢失等**。 + +### 2、传统部署 + +每新增一套集群,都要进行操作系统安装、环境配置、MySQL 数据库安装、调试、性能优化、调参,后续还有系统升级,数据库升级… + +### 3、运维 + +针对不同对场景,比如一源一副本,一源两副本,多源多副本(MGR)等,需要编写对应的运维脚本。 + +在集群规模不大,MySQL 实例不多的情况下,运维人员尚且能应付,但是当集群不断增加,MySQL 实例达到成千上万个的时候,会极大的增加运维人员的负担,效率也会变得低下。 + +规模越大,出现误操作的概率会越高,严重的甚至要 **“从删库跑路”**。误删除关键数据,对于一般企业而言,应对能力较弱,危害很可能是致命的。 + +### 4、资源弹性 + +传统物理机部署不具备秒级弹性的能力,来针对 MySQL 在高峰或低谷时资源的自动弹性伸缩。例如,在业务高峰扩展 CPU、内存等资源,在低峰期收回闲置资源。 + +如果设计一次电商秒杀的场景持续时间是 30 分钟,那么我们可以在 30 分钟内,将网络、CPU、内存、磁盘等资源提升到最大。在秒杀结束之后,释放这些资源,极大节省成本。 + +## 主流解决方案 + +针对如上挑战,主流的解决方案有两种: + +1. 物理机 + 管理平台 + + 通过数据库管理平台,对数据库的统一管理,减轻数据库运维成本,提高数据库整体的可用性。 + +2. 上云 + + 将数据库部署到一个虚拟计算环境中,也就是常说的数据库上云。市面上各大云厂商都提供了 RDS 服务。 + + 数据库上云可以实现按需付费、按需扩展、高可用性以及存储整合等优势。大大降低运维人员大规模部署和运维数据库的难度。 + +那么,还有没有其它方案来解决这些挑战呢?接下来为大家介绍数据库容器化。 + +## 为什么要做数据库容器化? + +据 CNCF 云原生产业联盟发布的[《中国云原生用户调查报告2020年》](https://www.cncf.io/blog/2021/04/28/cncf-cloud-native-survey-china-2020/)显示,60% 以上的中国企业已在生产环境中应用容器技术,其中 43% 的企业已将容器技术用于核心生产业务。 + +### 容器技术 + +![](https://pek3b.qingstor.com/kubesphere-community/images/radondb-2.jpg) + +### Docker 横空出世 + +- 相当轻量镜像标准化制作 + + 使安装部署和交付非常高效。解决了 PaaS 服务打包复杂,环境不一致等问题。 + +- 轻量级虚拟化(rootfs/cgroup/namespace) + + 有助于资源共享,降级性能损耗 + +### Kubernetes 容器编排的事实标准 + +依托着 Google Borg 项目的理论优势,继承了 Google 的大规模生产环境的经验。K8s 提供了一套基于容器构建分布式系统的基础依赖。K8s 也成为容器编排的事实标准。 + +- 运维能力 + + 路由网关、水平扩展、监控、备份、灾难恢复等。 + +- 声明式 API + + 描述容器化业务和容器间关系。 + +- 容器编排 + + 按照用户的意愿和整个系统的规则,完全自动化地处理好容器之间的各种关系。 + +通过观察用户实际使用 MySQL 时对容器化产品的迫切需求,可以看出 MySQL 容器化进程势在必行。以 [KubeSphere 开源社区](https://kubesphere.com.cn)为例,在其应用商店呼声最高的就是高可用版的 MySQL。 + +## MySQL 容器化探索 + +**RadonDB MySQL** 是一款基于 MySQL 的开源、高可用、云原生集群解决方案。支持一主多从高可用架构,并具备安全、自动备份、监控告警、自动扩容等全套管理功能。目前已经在生产环境中大规模的使用,包含 **银行,保险,传统大企业** 等。 + +[RadonDB MySQL Kubernetes](https://github.com/radondb/radondb-mysql-kubernetes) 支持在 Kubernetes 和 KubeSphere 上安装部署和管理,自动执行与运行 RadonDB MySQL 集群有关的任务。服务高可用由已经开源的 MySQL 集群高可用工具 **Xenon** 来实现。 + +### 架构图 + +每一个 Pod 里面的 Xenon 管理当前 Pod 中的 MySQL,主要获取并保存当前状态,获取当前执行的复制状态信息。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/radondb-3.jpg) + +### Helm 版 + +通用的包管理工具,将 K8s 资源模版化,方便共享。主要解决如下问题: + +* 部署应用资源,配置分离; +* 管理其生命周期; +* MySQL 的升级更新; +* 资源的删除。 + +主要功能: + +* MySQL 高可用 + * 无中心化领导者自动选举 + * 主从秒级切换 + * 数据强一致性 +* 集群管理 +* 监控告警 +* 集群日志管理 +* 账户管理 + +### KubeSphere 应用管理 + +![](https://pek3b.qingstor.com/kubesphere-community/images/radondb-4.jpg) + +可以通过终端执行命令部署回显信息。 + +```plain +$ xenoncli cluster status ++------------------------------------------------------+-------------------------------+--------+---------+--------------------------+---------------------+----------------+------------------------------------------------------+ +| ID | Raft | Mysqld | Monitor | Backup | Mysql | IO/SQL_RUNNING | MyLeader | ++------------------------------------------------------+-------------------------------+--------+---------+--------------------------+---------------------+----------------+------------------------------------------------------+ +| demo-radondb-mysql-0.demo-radondb-mysql.default:8801 | [ViewID:1 EpochID:2]@LEADER | UNKNOW | OFF | state:[NONE] + | [ALIVE] [READWRITE] | [true/true] | demo-radondb-mysql-0.demo-radondb-mysql.default:8801 | +| | | | | LastError: | | | | ++------------------------------------------------------+-------------------------------+--------+---------+--------------------------+---------------------+----------------+------------------------------------------------------+ +| demo-radondb-mysql-1.demo-radondb-mysql.default:8801 | [ViewID:1 EpochID:2]@FOLLOWER | UNKNOW | OFF | state:[NONE] + | [ALIVE] [READONLY] | [true/true] | demo-radondb-mysql-0.demo-radondb-mysql.default:8801 | +| | | | | LastError: | | | | ++------------------------------------------------------+-------------------------------+--------+---------+--------------------------+---------------------+----------------+------------------------------------------------------+ +| demo-radondb-mysql-2.demo-radondb-mysql.default:8801 | [ViewID:1 EpochID:2]@FOLLOWER | UNKNOW | OFF | state:[NONE] + | [ALIVE] [READONLY] | [true/true] | demo-radondb-mysql-0.demo-radondb-mysql.default:8801 | +| | | | | LastError: | | | | ++------------------------------------------------------+-------------------------------+--------+---------+--------------------------+---------------------+----------------+------------------------------------------------------+ +$ xenoncli cluster gtid ++------------------------------------------------------+----------+-------+-------------------+--------------------+ +| ID | Raft | Mysql | Executed_GTID_Set | Retrieved_GTID_Set | ++------------------------------------------------------+----------+-------+-------------------+--------------------+ +| demo-radondb-mysql-1.demo-radondb-mysql.default:8801 | FOLLOWER | ALIVE | | | ++------------------------------------------------------+----------+-------+-------------------+--------------------+ +| demo-radondb-mysql-2.demo-radondb-mysql.default:8801 | FOLLOWER | ALIVE | | | ++------------------------------------------------------+----------+-------+-------------------+--------------------+ +| demo-radondb-mysql-0.demo-radondb-mysql.default:8801 | LEADER | ALIVE | | | ++------------------------------------------------------+----------+-------+-------------------+--------------------+ +``` +## RoadMap + +### Operator 版 + +Operator 版针对特定场景做有状态服务,针对复杂应用的自动化管理。除了满足 Helm 版的需求之外,主要解决如下问题: + +* 监听 Kubernetes API,在实例创建、伸缩、死亡等各个生命周期中做相应的处理来保证有状态应用中的数据连续性; +* 指定节点跨机房/异地灾备,IP 固定等; +* 主从复制出现异常或者复制延时超过正常范围自动定位和自动修复等。 + +借助 Operator 框架,以及对精细粒度操控应用等功能需求。 + +【即将推出】主要功能: + +* 增删节点 +* 自动扩缩容 +* 升级集群 +* 备份与恢复 +* 故障自动转移 +* 自动重建节点 +* 自动重启服务 +* 账户管理(提供 API 接口) +* 在线迁移 +* 自动化运维 +* 多节点角色 +* 灾备集群 +* SSL 传输加密 +* …… + diff --git a/content/zh/blogs/openelb-joins-cncf-sandbox-project.md b/content/zh/blogs/openelb-joins-cncf-sandbox-project.md new file mode 100644 index 000000000..d39314c8b --- /dev/null +++ b/content/zh/blogs/openelb-joins-cncf-sandbox-project.md @@ -0,0 +1,95 @@ +--- +title: 'OpenELB 项目进入 CNCF Sandbox,让私有化环境对外暴露服务更简单' +tag: 'CNCF' +keyword: 'OpenELB, 负载均衡器, Kubernetes, LoadBalancer, 物理机' +description: 'CNCF 宣布由青云科技 KubeSphere 团队开源的负载均衡器插件 OpenELB 正式进入 CNCF 沙箱' +createTime: '2021-11-24' +author: 'KubeSphere' +snapshot: 'https://kubesphere-community.pek3b.qingstor.com/images/4761636694917_.pic_hd.jpg' +--- + +![头图](https://kubesphere-community.pek3b.qingstor.com/images/4761636694917_.pic_hd.jpg) + +11 月 10 日,云原生计算基金会 (CNCF) 宣布由青云科技 KubeSphere 团队开源的负载均衡器插件 OpenELB 正式进入 CNCF 沙箱(Sandbox)托管。 + +![示意图](https://kubesphere-community.pek3b.qingstor.com/images/8471636692467_.pic_hd.jpg) + +OpenELB 项目在此前命名为 PorterLB,是为物理机(Bare-metal)、边缘(Edge)和私有化环境设计的负载均衡器插件,可作为 Kubernetes、K3s、KubeSphere 的 LB 插件对集群外暴露 “LoadBalancer” 类型的服务,核心功能包括: + +- 基于 BGP 与 Layer 2 模式的负载均衡 +- 基于路由器 ECMP 的负载均衡 +- IP 地址池管理 +- 使用 CRD 进行 BGP 配置 + +![架构](https://kubesphere-community.pek3b.qingstor.com/images/8441636691354_.pic_hd.jpg) + +## 为什么发起 OpenELB + +我们起初在 KubeSphere 社区做了一项针对广大社区用户安装部署 Kubernetes 所使用环境的调研,从 5000 多份 KubeSphere 用户调研数据中发现有近 36% 的用户选择在物理机安装部署 Kubernetes,占比高居第一位。并且还有大量客户是在离线的数据中心或边缘设备安装和使用 Kubernetes 或 K3s,导致用户在私有环境对外暴露 LoadBalancer 服务比较困难。 + +![用户调研](https://kubesphere-community.pek3b.qingstor.com/images/8401636689164_.pic.jpg) + +我们知道,在 Kubernetes 集群中可以使用 “LoadBalancer” 类型的服务将后端工作负载暴露在外部。云厂商通常为 Kubernetes 提供云上的 LB 插件,但这需要将集群部署在特定 IaaS 平台上。然而,许多企业用户通常都将 Kubernetes 集群部署在裸机上,尤其是用于生产环境时。而且对于私有化环境特别是物理机或边缘集群,Kubernetes 并不提供 LoadBalancer 方案。 + +OpenELB 解决了在非公有云环境的 Kubernetes 集群下对外暴露 LoadBalancer 服务的问题,为私有化环境的用户提供了易用的 EIP 与 IP Pool 管理能力。 + +## 社区情况 + +目前 OpenELB 已具备生产可用的特性,已被**本来生活、苏州电视台、视源股份、云智天下、Jollychic、QingCloud、百旺、Rocketbyte** 等海内外多家企业采用。早在 2019 年底,本来生活就将 OpenELB 的早期版本用于其生产环境,可参考 [OpenELB 如何帮助本来生活在 K8s 物理环境暴露集群服务](https://mp.weixin.qq.com/s/uFwYaPE7cVolLWxYHcgZdQ) 了解详情。OpenELB 项目目前有 13 位贡献者,100 多位社区成员。 + +![采用企业](https://kubesphere-community.pek3b.qingstor.com/images/8411636689286_.pic_hd.jpg) + +## 与 MetalLB 的对比 + +MetalLB 在近期也加入了 CNCF Sandbox,该项目在 2017 年底发起,经过 4 年的发展已经在社区被广泛采用。OpenELB 作为后起之秀,采用了更加 Kubernetes-native 的实现方式,虽然起步更晚但得益于社区的帮助,已经迭代了 8 个版本并支持了多种路由方式。很多用户关注 MetalLB 与 OpenELB 的差异性,以下简单介绍两者对比。 + +### 云原生架构 + +在 OpenELB 中,不管是地址管理,还是 BGP 配置管理,你都可以使用 CRD 来配置。对于习惯了 Kubectl 的用户而言, OpenELB 十分友好。对于想定制 OpenELB 的高级用户,也可以直接调用 Kubernetes API 来做二次开发。在 MetalLB 中,需通过 ConfigMap 来配置, 感知它们的状态需要通过查看监控或者日志。 + +### 灵活的地址管理 + +OpenELB 通过 EIP CRD 管理地址,它定义子资源 Status 来存储地址分配状态,这样就不会存在分配地址时各副本发生冲突,编程时逻辑也会简单。 + +### 使用 gobgp 发布路由 + +不同于 MetalLB 自己实现 BGP 协议, OpenELB 采用标准的 gobgp 来发布路由,这样做的好处如下: + +- 开发成本低,且有 gobgp 社区支持 +- 可以利用 gobgp 丰富特性 +- 通过 BgpConf/BgpPeer CRD 动态配置 gobgp,用户无需重启 OpenELB 即可动态加载最新的配置信息 +- gobgp 作为 lib 使用时, 社区提供了基于 protobuf 的 API, OpenELB 在实现 BgpConf/BgpPeer CRD 时也是参照该 API,并保持兼容 +- OpenELB 也提供 status 用于查看 BGP neighbor 配置,状态信息丰富 + +### 架构简单,资源占用少 + +OpenELB 目前只用部署 Deployment 即可,通过多副本实现高可用,部分副本 Crash 之后并不会影响已建立的正常连接。 + +BGP 模式下, Deployment 不同副本都会与路由器建立连接用于发布等价路由,所以正常情况下我们部署两个副本即可。在 Layer 2 模式下,不同副本之间通过 Kubernetes 提供的 Leader Election 机制选举 Leader,进而应答 ARP/NDP。 + +## OpenELB 安装与使用 + +目前 OpenELB 可支持部署在任意标准的 Kubernetes、K3s 以及其发行版,通过 Yaml 文件或 Helm Chart 一条命令完成部署。同时,在 KubeSphere 容器平台的应用商店和应用仓库也可以通过界面一键部署,可参考 [OpenELB 文档](https://openelb.github.io/docs/getting-started/installation/) 进行部署。 + +## 未来规划 + +得益于 CNCF 为项目提供了开源和中立的背书,OpenELB 也将真正变成一个由 100% 社区驱动的开源项目。接下来,OpenELB 将开发与实现如下功能,欢迎给社区提交需求与反馈: + +- 基于 Keepalived 实现 VIP 模式的高可用 +- 实现 kube-apiserver 的 LoadBalancer +- 支持 BGP 策略与配置 +- 支持 VIP Group +- 支持 IPv6 +- 提供独立的界面管理与配置 EIP 与 IP Pool +- 集成至 KubeSphere Console;提供 Prometheus metrics + +OpenELB 还将重点开展社区运营并推出一系列社区活动,希望借助更多开发者和用户的力量,解决用户在私有环境下的服务暴露与 IP 管理问题,为应用在 Kubernetes 上打开一扇大门,使服务对外暴露与管理变得更加轻松。 + +## 持续开源开放 + +KubeSphere 团队秉持 “Upstream first” 的原则,今年 7 月份先将 Fluentbit Operator 项目捐给 Fluent 社区成为 CNCF 子项目,此次又将 OpenELB 加入 CNCF Sandbox。未来 KubeSphere 团队将继续保持开源、开放的理念,持续作为 OpenELB 项目的参与方之一,推动国内和国际开源组织的生态建设,帮助 OpenELB 社区培育一个中立的开源社区与生态,与更多的容器平台及上下游生态伙伴进行深度合作,欢迎大家关注、使用 OpenELB 以及参与社区贡献。 + +- ✨ GitHub:[https://github.com/kubesphere/openelb/](https://github.com/kubesphere/openelb/) +- 💻 官网:[https://openelb.github.io/](https://openelb.github.io/) +- 🙋 社群:请通过官网底部二维码关注 KubeSphere 公众号加入社群 + diff --git a/content/zh/blogs/rook-on-kubesphere.md b/content/zh/blogs/rook-on-kubesphere.md new file mode 100644 index 000000000..aa7bf86f0 --- /dev/null +++ b/content/zh/blogs/rook-on-kubesphere.md @@ -0,0 +1,388 @@ +--- +title: '在 Kubernetes 中使用 Rook 构建云原生存储环境' +tag: '存储' +keywords: 'Rook, KubeSphere, Ceph, Kubernetes' +description: '本文将介绍如何使用 Rook 来创建维护 Ceph 集群,并作为 Kubernetes 的持久化存储。' +createTime: '2021-12-29' +author: '尹珉' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/rook-on-kubesphere.png' +--- + +## Rook 介绍 + +Rook 是一个开源的云原生存储编排器,为各种存储解决方案提供平台、框架和支持,以便与云原生环境进行原生集成。 + +Rook 将分布式存储系统转变为自管理、自扩展、自修复的存储服务。它使存储管理员的部署、引导、配置、配置、扩展、升级、迁移、灾难恢复、监控和资源管理等任务自动化。 + +简而言之,Rook 就是一组 Kubernetes 的 Operator,它可以完全控制多种数据存储解决方案(例如 Ceph、EdgeFS、Minio、Cassandra)的部署,管理以及自动恢复。 + +到目前为止,Rook 支持的最稳定的存储仍然是 Ceph,本文将介绍如何使用 Rook 来创建维护 Ceph 集群,并作为 Kubernetes 的持久化存储。 + +## 环境准备 + +K8s 环境可以通过安装 KubeSphere 进行部署,我使用的是高可用方案。 + +在公有云上安装 KubeSphere 参考文档:[多节点安装](https://v3-1.docs.kubesphere.io/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs/ "多节点安装") + +⚠️ 注意:kube-node(5,6,7)的节点上分别有两块数据盘。 + +```bash +kube-master1 Ready master 118d v1.17.9 +kube-master2 Ready master 118d v1.17.9 +kube-master3 Ready master 118d v1.17.9 +kube-node1 Ready worker 118d v1.17.9 +kube-node2 Ready worker 118d v1.17.9 +kube-node3 Ready worker 111d v1.17.9 +kube-node4 Ready worker 111d v1.17.9 +kube-node5 Ready worker 11d v1.17.9 +kube-node6 Ready worker 11d v1.17.9 +kube-node7 Ready worker 11d v1.17.9 +``` + +安装前请确保 node 节点都安装上了 lvm2,否则会报错。 + +## 部署安装 Rook、Ceph 集群 + +**1.克隆 Rook 仓库到本地** + +```bash +$ git clone -b release-1.4 https://github.com/rook/rook.git +``` + +**2.切换目录** + +```bash +$ cd /root/ceph/rook/cluster/examples/kubernetes/ceph +``` + +**3.部署 Rook,创建 CRD 资源** + +```bash +$ kubectl create -f common.yaml -f operator.yaml +# 说明: +# 1.comm.yaml里面主要是权限控制以及CRD资源定义 +# 2.operator.yaml是rook-ceph-operator的deloyment +``` + +**4.创建 Ceph 集群** + +``` +$ kubectl create -f cluster.yaml +# 重要说明: +# 演示不做定制化操作,Ceph集群默认会动态去识别node节点上未格式化的全新空闲硬盘,自动会对这些盘进行OSD初始化(至少是需要3个节点,每个节点至少一块空闲硬盘) +``` + +**5.检查 pod 状态** + +```bash +$ kubectl get pod -n rook-ceph -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +csi-cephfsplugin-5fw92 3/3 Running 6 12d 192.168.0.31 kube-node7 +csi-cephfsplugin-78plf 3/3 Running 0 12d 192.168.0.134 kube-node1 +csi-cephfsplugin-bkdl8 3/3 Running 3 12d 192.168.0.195 kube-node5 +csi-cephfsplugin-provisioner-77f457bcb9-6w4cv 6/6 Running 0 12d 10.233.77.95 kube-node4 +csi-cephfsplugin-provisioner-77f457bcb9-q7vxh 6/6 Running 0 12d 10.233.76.156 kube-node3 +csi-cephfsplugin-rqb4d 3/3 Running 0 12d 192.168.0.183 kube-node4 +csi-cephfsplugin-vmrfj 3/3 Running 0 12d 192.168.0.91 kube-node3 +csi-cephfsplugin-wglsw 3/3 Running 3 12d 192.168.0.116 kube-node6 +csi-rbdplugin-4m8hv 3/3 Running 0 12d 192.168.0.91 kube-node3 +csi-rbdplugin-7wt45 3/3 Running 3 12d 192.168.0.195 kube-node5 +csi-rbdplugin-bn5pn 3/3 Running 3 12d 192.168.0.116 kube-node6 +csi-rbdplugin-hwl4b 3/3 Running 6 12d 192.168.0.31 kube-node7 +csi-rbdplugin-provisioner-7897f5855-7m95p 6/6 Running 0 12d 10.233.77.94 kube-node4 +csi-rbdplugin-provisioner-7897f5855-btwt5 6/6 Running 0 12d 10.233.76.155 kube-node3 +csi-rbdplugin-qvksp 3/3 Running 0 12d 192.168.0.183 kube-node4 +csi-rbdplugin-rr296 3/3 Running 0 12d 192.168.0.134 kube-node1 +rook-ceph-crashcollector-kube-node1-64cf6f49fb-bx8lz 1/1 Running 0 12d 10.233.101.46 kube-node1 +rook-ceph-crashcollector-kube-node3-575b75dc64-gxwtp 1/1 Running 0 12d 10.233.76.149 kube-node3 +rook-ceph-crashcollector-kube-node4-78549d6d7f-9zz5q 1/1 Running 0 8d 10.233.77.226 kube-node4 +rook-ceph-crashcollector-kube-node5-5db8557476-b8zp6 1/1 Running 1 11d 10.233.81.239 kube-node5 +rook-ceph-crashcollector-kube-node6-78b7946769-8qh45 1/1 Running 0 8d 10.233.66.252 kube-node6 +rook-ceph-crashcollector-kube-node7-78c97898fd-k85l4 1/1 Running 1 8d 10.233.111.33 kube-node7 +rook-ceph-mds-myfs-a-86bdb684b6-4pbj7 1/1 Running 0 8d 10.233.77.225 kube-node4 +rook-ceph-mds-myfs-b-6697d66b7d-jgnkw 1/1 Running 0 8d 10.233.66.250 kube-node6 +rook-ceph-mgr-a-658db99d5b-jbrzh 1/1 Running 0 12d 10.233.76.162 kube-node3 +rook-ceph-mon-a-5cbf5947d8-vvfgf 1/1 Running 1 12d 10.233.101.44 kube-node1 +rook-ceph-mon-b-6495c96d9d-b82st 1/1 Running 0 12d 10.233.76.144 kube-node3 +rook-ceph-mon-d-dc4c6f4f9-rdfpg 1/1 Running 1 12d 10.233.66.219 kube-node6 +rook-ceph-operator-56fc54bb77-9rswg 1/1 Running 0 12d 10.233.76.138 kube-node3 +rook-ceph-osd-0-777979f6b4-jxtg9 1/1 Running 1 11d 10.233.81.237 kube-node5 +rook-ceph-osd-10-589487764d-8bmpd 1/1 Running 0 8d 10.233.111.59 kube-node7 +rook-ceph-osd-11-5b7dd4c7bc-m4nqz 1/1 Running 0 8d 10.233.111.60 kube-node7 +rook-ceph-osd-2-54cbf4d9d8-qn4z7 1/1 Running 1 10d 10.233.66.222 kube-node6 +rook-ceph-osd-6-c94cd566-ndgzd 1/1 Running 1 10d 10.233.81.238 kube-node5 +rook-ceph-osd-7-d8cdc94fd-v2lm8 1/1 Running 0 9d 10.233.66.223 kube-node6 +rook-ceph-osd-prepare-kube-node1-4bdch 0/1 Completed 0 66m 10.233.101.91 kube-node1 +rook-ceph-osd-prepare-kube-node3-bg4wk 0/1 Completed 0 66m 10.233.76.252 kube-node3 +rook-ceph-osd-prepare-kube-node4-r9dk4 0/1 Completed 0 66m 10.233.77.107 kube-node4 +rook-ceph-osd-prepare-kube-node5-rbvcn 0/1 Completed 0 66m 10.233.81.73 kube-node5 +rook-ceph-osd-prepare-kube-node5-rcngg 0/1 Completed 5 10d 10.233.81.98 kube-node5 +rook-ceph-osd-prepare-kube-node6-jc8cm 0/1 Completed 0 66m 10.233.66.109 kube-node6 +rook-ceph-osd-prepare-kube-node6-qsxrp 0/1 Completed 0 11d 10.233.66.109 kube-node6 +rook-ceph-osd-prepare-kube-node7-5c52p 0/1 Completed 5 8d 10.233.111.58 kube-node7 +rook-ceph-osd-prepare-kube-node7-h5d6c 0/1 Completed 0 66m 10.233.111.110 kube-node7 +rook-ceph-osd-prepare-kube-node7-tzvp5 0/1 Completed 0 11d 10.233.111.102 kube-node7 +rook-ceph-osd-prepare-kube-node7-wd6dt 0/1 Completed 7 8d 10.233.111.56 kube-node7 +rook-ceph-tools-64fc489556-5clvj 1/1 Running 0 12d 10.233.77.118 kube-node4 +rook-discover-6kbvg 1/1 Running 0 12d 10.233.101.42 kube-node1 +rook-discover-7dr44 1/1 Running 2 12d 10.233.66.220 kube-node6 +rook-discover-dqr82 1/1 Running 0 12d 10.233.77.74 kube-node4 +rook-discover-gqppp 1/1 Running 0 12d 10.233.76.139 kube-node3 +rook-discover-hdkxf 1/1 Running 1 12d 10.233.81.236 kube-node5 +rook-discover-pzhsw 1/1 Running 3 12d 10.233.111.36 kube-node7 + +``` + +以上是所有组件的 pod 完成后的状态,其中 rook-ceph-osd-prepare 开头的 pod 是自动感知集群新挂载硬盘的,只要有新硬盘挂载到集群自动会触发 OSD。 + +**6.配置 Ceph 集群 dashboard** + +Ceph Dashboard 是一个内置的基于 Web 的管理和监视应用程序,它是开源 Ceph 发行版的一部分。通过 Dashboard 可以获取 Ceph 集群的各种基本状态信息。 + +默认的 ceph 已经安装的 ceph-dashboard,其 SVC 地址是 service clusterIP,并不能被外部访问,需要创建 service 服务 + +```bash +$ kubectl apply -f dashboard-external-http.yaml +``` + +```bash +apiVersion: v1 +kind: Service +metadata: + name: rook-ceph-mgr-dashboard-external-https + namespace: rook-ceph # namespace:cluster + labels: + app: rook-ceph-mgr + rook_cluster: rook-ceph # namespace:cluster +spec: + ports: + - name: dashboard + port: 7000 + protocol: TCP + targetPort: 7000 + selector: + app: rook-ceph-mgr + rook_cluster: rook-ceph + sessionAffinity: None + type: NodePort +``` + +**说明**:由于 8443 是 https 访问端口需要配置证书,本教程只展示 http 访问 port 上只配置了 7000 + +**7.查看 svc 状态** + +```bash +$ kubectl get svc -n rook-ceph +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +csi-cephfsplugin-metrics ClusterIP 10.233.3.172 8080/TCP,8081/TCP 12d +csi-rbdplugin-metrics ClusterIP 10.233.43.23 8080/TCP,8081/TCP 12d +rook-ceph-mgr ClusterIP 10.233.63.85 9283/TCP 12d +rook-ceph-mgr-dashboard ClusterIP 10.233.20.159 7000/TCP 12d +rook-ceph-mgr-dashboard-external-https NodePort 10.233.56.73 7000:31357/TCP 12d +rook-ceph-mon-a ClusterIP 10.233.30.222 6789/TCP,3300/TCP 12d +rook-ceph-mon-b ClusterIP 10.233.55.25 6789/TCP,3300/TCP 12d +rook-ceph-mon-d ClusterIP 10.233.0.206 6789/TCP,3300/TCP 12d + +``` + +**8.验证访问 dashboard** + +打开 KubeSphere 平台开启外网服务 + +![](https://pek3b.qingstor.com/kubesphere-community/images/7ca284ce-49d9-4ba4-ba10-8ffbaacac98c.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/d9a616b7-44f6-4a5c-85c8-e37ac9e00576.png) + +访问方式: + +```bash +http://{master1-ip:31357} +``` + +用户名获取方法: + +```bash +$ kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}"|base64 --decode && echo +``` + +![](https://pek3b.qingstor.com/kubesphere-community/images/5f1f856f-6bf8-42ed-9d2a-c2ef1c88f8e8.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/72082e58-7a68-4e07-80d7-34f05d34e03f.png) + +**说明**:dashboard 显示 HEALTH_WARN 警告可以通过 seelog 的方式查看具体的原因,一般是 osd down、pg 数量不够等 + +**9.部署 rook 工具箱** + +Rook 工具箱是一个包含用于 Rook 调试和测试的常用工具的容器 + +```bash +$ kubectl apply -f toolbox.yaml +``` + +进入工具箱查看 Ceph 集群状态 + +```bash +$ kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') -- bash +``` + +```bash +$ ceph -s + cluster: + id: 1457045a-4926-411f-8be8-c7a958351a38 + health: HEALTH_WARN + mon a is low on available space + 2 osds down + Degraded data redundancy: 25/159 objects degraded (15.723%), 16 pgs degraded, 51 pgs undersized + 3 daemons have recently crashed + + services: + mon: 3 daemons, quorum a,b,d (age 9d) + mgr: a(active, since 4h) + mds: myfs:1 {0=myfs-b=up:active} 1 up:standby-replay + osd: 12 osds: 6 up (since 8d), 8 in (since 8d); 9 remapped pgs + + data: + pools: 5 pools, 129 pgs + objects: 53 objects, 37 MiB + usage: 6.8 GiB used, 293 GiB / 300 GiB avail + pgs: 25/159 objects degraded (15.723%) + 5/159 objects misplaced (3.145%) + 69 active+clean + 35 active+undersized + 16 active+undersized+degraded + 9 active+clean+remapped + +``` + +工具箱相关查询命令 + +```bash +ceph status +ceph osd status +ceph df +rados df +``` + +## 部署 StorageClass + +**1.rbd 块存储简介** + +Ceph 可以同时提供对象存储 RADOSGW、块存储 RBD、文件系统存储 Ceph FS。 RBD 即 RADOS Block Device 的简称,RBD 块存储是最稳定且最常用的存储类型。RBD 块设备类似磁盘可以被挂载。 RBD 块设备具有快照、多副本、克隆和一致性等特性,数据以条带化的方式存储在 Ceph 集群的多个 OSD 中。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/499b9863-ea8e-4244-b092-21a728833fe1.png) + +**2.创建 StorageClass** + +```bash +[root@kube-master1 rbd]# kubectl apply -f storageclass.yaml +``` + +**3.查看 StorageClass 部署状态** + +![](https://pek3b.qingstor.com/kubesphere-community/images/c47bf4ea-d371-4714-b07a-b3ecefeb40db.png) + +**4.创建 pvc** + +```bash +$ kubectl apply -f pvc.yaml +``` + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rbd-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: rook-ceph-block +~ +``` + +**5.创建带有 pvc 的 pod** + +```bash +$ kubectl apply -f pod.yaml +``` + +```bash +apiVersion: v1 +kind: Pod +metadata: + name: csirbd-demo-pod +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www/html + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: rbd-pvc + readOnly: false + +``` + +**6.查看 pod、pvc、pv 状态** + +![](https://pek3b.qingstor.com/kubesphere-community/images/c47bf4ea-d371-4714-b07a-b3ecefeb40db.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/b5695378-a271-4389-933f-4808d472fceb.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/668ec96d-c099-4482-b0d2-73300b0803ef.png) + +## 总结 + +对于首次接触 rook+Ceph 部署体验的同学来说需要了解的内容较多,遇到的坑也会比较的多。希望通过以上的部署过程记录可以帮助到大家。 + +**1.Ceph 集群一直提示没有可 osd 的盘** + +答:这里遇到过几个情况,查看下挂载的数据盘是不是以前已经使用过虽然格式化了但是以前的 raid 信息还存在?可以使用一下脚本进行清理后在格式化在进行挂载。 + +```bash +#!/usr/bin/env bash +DISK="/dev/vdc" #按需修改自己的盘符信息 + +# Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean) + +# You will have to run this step for all disks. +sgdisk --zap-all $DISK + +# Clean hdds with dd +dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync + +# Clean disks such as ssd with blkdiscard instead of dd +blkdiscard $DISK + +# These steps only have to be run once on each node +# If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks. +ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % + +# ceph-volume setup can leave ceph- directories in /dev and /dev/mapper (unnecessary clutter) +rm -rf /dev/ceph-* +rm -rf /dev/mapper/ceph--* + +# Inform the OS of partition table changes +partprobe $DISK +~ +``` + +**2.Ceph 支持哪些存储类型?** + +答:rdb 块存储、cephfs 文件存储、s3 对象存储等 + +**3.部署中出现各种坑应该怎么排查?** + +答:强烈建议通过 rook、ceph 官网去查看相关文档进行排错 + +- https://rook.github.io/docs/rook/ +- https://docs.ceph.com/en/pacific/ + +**4.访问 dashboard 失败** + +答:如果是公有云搭建的 KubeSphere 或 K8s 请把 nodeport 端口在安全组里放行即可 diff --git a/content/zh/blogs/serverless-way-for-kubernetes-log-alert.md b/content/zh/blogs/serverless-way-for-kubernetes-log-alert.md new file mode 100644 index 000000000..afab72728 --- /dev/null +++ b/content/zh/blogs/serverless-way-for-kubernetes-log-alert.md @@ -0,0 +1,426 @@ +--- +title: 'OpenFunction 应用系列之一: 以 Serverless 的方式实现 Kubernetes 日志告警' +tag: 'OpenFunction, KubeSphere, Kubernetes' +keywords: 'penFunction, Serverless, KubeSphere, Kubernetes, Kafka, FaaS, 无服务器' +description: '本文提供了一种基于 Serverless 的日志处理思路,可以在降低该任务链路成本的同时提高其灵活性。' +createTime: '2021-08-26' +author: '方阗' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202109031518797.png' +--- +## 概述 + +当我们将容器的日志收集到消息服务器之后,我们该如何处理这些日志?部署一个专用的日志处理工作负载可能会耗费多余的成本,而当日志体量骤增、骤降时亦难以评估日志处理工作负载的待机数量。本文提供了一种基于 Serverless 的日志处理思路,可以在降低该任务链路成本的同时提高其灵活性。 + +我们的大体设计是使用 Kafka 服务器作为日志的接收器,之后以输入 Kafka 服务器的日志作为事件,驱动 Serverless 工作负载对日志进行处理。据此的大致步骤为: + +1. 搭建 Kafka 服务器作为 Kubernetes 集群的日志接收器 +2. 部署 OpenFunction 为日志处理工作负载提供 Serverless 能力 +3. 编写日志处理函数,抓取特定的日志生成告警消息 +4. 配置 [Notification Manager](https://github.com/kubesphere/notification-manager/) 将告警发送至 Slack + +![](https://pek3b.qingstor.com/kubesphere-community/images/202108261124546.png) + +在这个场景中,我们会利用到 [OpenFunction](https://github.com/OpenFunction/OpenFunction) 带来的 Serverless 能力。 + +> [OpenFunction](https://github.com/OpenFunction/OpenFunction) 是 KubeSphere 社区开源的一个 FaaS(Serverless)项目,旨在让用户专注于他们的业务逻辑,而不必关心底层运行环境和基础设施。该项目当前具备以下关键能力: +> +> - 支持通过 dockerfile 或 buildpacks 方式构建 OCI 镜像 +> - 支持使用 Knative Serving 或 OpenFunctionAsync ( KEDA + Dapr ) 作为 runtime 运行 Serverless 工作负载 +> - 自带事件驱动框架 + +## 使用 Kafka 作为日志接收器 + +首先,我们为 KubeSphere 平台开启 **logging** 组件(可以参考 [启用可插拔组件](https://kubesphere.io/zh/docs/pluggable-components/) 获取更多信息)。然后我们使用 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) 搭建一个最小化的 Kafka 服务器。 + +1. 在 default 命名空间中安装 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) : + + ```shell + helm repo add strimzi https://strimzi.io/charts/ + helm install kafka-operator -n default strimzi/strimzi-kafka-operator + ``` + +2. 运行以下命令在 default 命名空间中创建 Kafka 集群和 Kafka 主题,该命令所创建的 Kafka 和 Zookeeper 集群的存储类型为 **ephemeral**,使用 emptyDir 进行演示。 + + > 注意,我们此时创建了一个名为 “logs” 的 topic,后续会用到它 + + ```shell + cat < 如果您启用了[多集群功能](https://kubesphere.io/zh/docs/multicluster-management/),您可以选择一个集群。 + +2. 在**集群管理**页面,选择**集群设置**下的**日志收集**。 + +3. 点击**添加日志接收器**并选择 **Kafka**。输入 Kafka 代理地址和端口信息,然后点击**确定**继续。 + +![](https://i.imgur.com/RcIcQ3a.png) + + +4. 运行以下命令验证 Kafka 集群是否能从 Fluent Bit 接收日志: + + ```shell + # 启动一个工具 pod + $ kubectl run utils --image=arunvelsriram/utils -i --tty --rm + # 检查 logs topic 中的日志情况 + $ kafkacat -C -b kafka-logs-receiver-kafka-0.kafka-logs-receiver-kafka-brokers.default.svc:9092 -t logs + ``` + +## 部署 OpenFunction + +按照概述中的设计,我们需要先部署 OpenFunction。OpenFunction 项目引用了很多第三方的项目,如 Knative、Tekton、ShipWright、Dapr、KEDA 等,手动安装较为繁琐,推荐使用 [Prerequisites 文档](https://github.com/OpenFunction/OpenFunction#prerequisites) 中的方法,一键部署 OpenFunction 的依赖组件。 + +> 其中 `--with-shipwright` 表示部署 shipwright 作为函数的构建驱动 +> `--with-openFuncAsync` 表示部署 OpenFuncAsync Runtime 作为函数的负载驱动 +> 而当你的网络在访问 Github 及 Google 受限时,可以加上 `--poor-network` 参数用于下载相关的组件 +```shell +sh hack/deploy.sh --with-shipwright --with-openFuncAsync --poor-network +``` + +部署 OpenFunction: + +> 此处选择安装最新的稳定版本,你也可以使用开发版本,参考 [Install 文档](https://github.com/OpenFunction/OpenFunction#install) +> +> 为了可以正常使用 ShipWright ,我们提供了默认的构建策略,可以使用以下命令设置该策略: +> +> ```shell +> kubectl apply -f https://raw.githubusercontent.com/OpenFunction/OpenFunction/main/config/strategy/openfunction.yaml +> ``` +```shell +kubectl apply -f https://github.com/OpenFunction/OpenFunction/releases/download/v0.3.0/bundle.yaml +``` + +## 编写日志处理函数 + +我们以 [创建并部署 WordPress](https://kubesphere.io/zh/docs/quick-start/wordpress-deployment/) 为例,搭建一个 WordPress 应用作为日志的生产者。该应用的工作负载所在的命名空间为 “demo-project”,Pod 名称为 “wordpress-v1-f54f697c5-hdn2z”。 + +当请求结果为 404 时,我们收到的日志内容如下: +```json +{"@timestamp":1629856477.226758,"log":"*.*.*.* - - [25/Aug/2021:01:54:36 +0000] \"GET /notfound HTTP/1.1\" 404 49923 \"-\" \"curl/7.58.0\"\n","time":"2021-08-25T01:54:37.226757612Z","kubernetes":{"pod_name":"wordpress-v1-f54f697c5-hdn2z","namespace_name":"demo-project","container_name":"container-nrdsp1","docker_id":"bb7b48e2883be0c05b22c04b1d1573729dd06223ae0b1676e33a4fac655958a5","container_image":"wordpress:4.8-apache"}} + +``` + +我们的需求是:当一个请求结果为 404 时,发送一个告警通知给接收器(可以根据 [配置 Slack 通知](https://kubesphere.io/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack/) 配置一个 Slack 告警接收器),并记录命名空间、Pod 名称、请求路径、请求方法等信息。按照这个需求,我们编写一个简单的处理函数: +> 你可以从 [OpenFunction Context Spec](https://github.com/OpenFunction/functions-framework/blob/main/docs/OpenFunction-context-specs.md) 处了解 **openfunction-context** 的使用方法,这是 OpenFunction 提供给用户编写函数的工具库 +> 你可以通过 [OpenFunction Samples](https://github.com/OpenFunction/samples) 了解更多的 OpenFunction 函数案例 + +```go +package logshandler + +import ( + "encoding/json" + "fmt" + "log" + "regexp" + "time" + + ofctx "github.com/OpenFunction/functions-framework-go/openfunction-context" + alert "github.com/prometheus/alertmanager/template" +) + +const ( + HTTPCodeNotFound = "404" + Namespace = "demo-project" + PodName = "wordpress-v1-[A-Za-z0-9]{9}-[A-Za-z0-9]{5}" + AlertName = "404 Request" + Severity = "warning" +) + +// LogsHandler ctx 参数提供了用户函数在集群语境中的上下文句柄,如 ctx.SendTo 用于将数据发送至指定的目的地 +// LogsHandler in 参数用于将输入源中的数据(如有)以 bytes 的方式传递给函数 +func LogsHandler(ctx *ofctx.OpenFunctionContext, in []byte) int { + content := string(in) + // 这里我们设置了三个正则表达式,分别用于匹配 HTTP 返回码、资源命名空间、资源 Pod 名称 + matchHTTPCode, _ := regexp.MatchString(fmt.Sprintf(" %s ", HTTPCodeNotFound), content) + matchNamespace, _ := regexp.MatchString(fmt.Sprintf("namespace_name\":\"%s", Namespace), content) + matchPodName := regexp.MustCompile(fmt.Sprintf(`(%s)`, PodName)).FindStringSubmatch(content) + + if matchHTTPCode && matchNamespace && matchPodName != nil { + log.Printf("Match log - Content: %s", content) + + // 如果上述三个正则表达式同时命中,那么我们需要提取日志内容中的一些信息,用于填充至告警信息中 + // 这些信息为:404 请求的请求方式(HTTP Method)、请求路径(HTTP Path)以及 Pod 名称 + match := regexp.MustCompile(`([A-Z]+) (/\S*) HTTP`).FindStringSubmatch(content) + if match == nil { + return 500 + } + path := match[len(match)-1] + method := match[len(match)-2] + podName := matchPodName[len(matchPodName)-1] + + // 收集到关键信息后,我们使用 altermanager 的 Data 结构体组装告警信息 + notify := &alert.Data{ + Receiver: "notification_manager", + Status: "firing", + Alerts: alert.Alerts{}, + GroupLabels: alert.KV{"alertname": AlertName, "namespace": Namespace}, + CommonLabels: alert.KV{"alertname": AlertName, "namespace": Namespace, "severity": Severity}, + CommonAnnotations: alert.KV{}, + ExternalURL: "", + } + alt := alert.Alert{ + Status: "firing", + Labels: alert.KV{ + "alertname": AlertName, + "namespace": Namespace, + "severity": Severity, + "pod": podName, + "path": path, + "method": method, + }, + Annotations: alert.KV{}, + StartsAt: time.Now(), + EndsAt: time.Time{}, + GeneratorURL: "", + Fingerprint: "", + } + notify.Alerts = append(notify.Alerts, alt) + notifyBytes, _ := json.Marshal(notify) + + // 使用 ctx.SendTo 将内容发送给名为 "notification-manager" 的输出端(你可以在之后的函数配置 logs-handler-function.yaml 中找到它的定义) + if err := ctx.SendTo(notifyBytes, "notification-manager"); err != nil { + panic(err) + } + log.Printf("Send log to notification manager.") + } + return 200 +} + +``` + +我们将这个函数上传到代码仓库中,记录**代码仓库的地址**以及**代码在仓库中的目录路径**,在下面的创建函数步骤中我们将使用到这两个值。 +> 你可以在 [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/OpenFuncAsync/logs-handler-function) 中找到这个案例。 + +## 创建函数 + +接下来我们将使用 OpenFunction 构建上述的函数。首先设置一个用于访问镜像仓库的秘钥文件 **push-secret**(在使用代码构建出 OCI 镜像后,OpenFunction 会将该镜像上传到用户的镜像仓库中,用于后续的负载启动): + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +应用函数 **logs-handler-function.yaml**: + +> 函数定义中包含了对两个关键组件的使用: +> +> [Dapr](https://dapr.io/) 对应用程序屏蔽了复杂的中间件,使得 logs-handler 可以非常容易地处理 Kafka 中的事件 +> +> [KEDA](https://keda.sh/) 通过监控消息服务器中的事件流量来驱动 logs-handler 函数的启动,并且根据 Kafka 中消息的消费延时动态扩展 logs-handler 实例 + +```yaml +apiVersion: core.openfunction.io/v1alpha1 +kind: Function +metadata: + name: logs-handler +spec: + version: "v1.0.0" + # 这里定义了构建后的镜像的上传路径 + image: openfunctiondev/logs-async-handler:v1 + imageCredentials: + name: push-secret + build: + builder: openfunctiondev/go115-builder:v0.2.0 + env: + FUNC_NAME: "LogsHandler" + # 这里定义了源代码的路径 + # url 为上面提到的代码仓库地址 + # sourceSubPath 为代码在仓库中的目录路径 + srcRepo: + url: "https://github.com/OpenFunction/samples.git" + sourceSubPath: "functions/OpenFuncAsync/logs-handler-function/" + serving: + # OpenFuncAsync 是 OpenFunction 通过 KEDA+Dapr 实现的一种由事件驱动的异步函数运行时 + runtime: "OpenFuncAsync" + openFuncAsync: + # 此处定义了函数的输入(kafka-receiver)和输出(notification-manager),与下面 components 中的定义对应关联 + dapr: + inputs: + - name: kafka-receiver + type: bindings + outputs: + - name: notification-manager + type: bindings + params: + operation: "post" + type: "bindings" + annotations: + dapr.io/log-level: "debug" + # 这里完成了上述输入端和输出端的具体定义(即 Dapr Components) + components: + - name: kafka-receiver + type: bindings.kafka + version: v1 + metadata: + - name: brokers + value: "kafka-logs-receiver-kafka-brokers:9092" + - name: authRequired + value: "false" + - name: publishTopic + value: "logs" + - name: topics + value: "logs" + - name: consumerGroup + value: "logs-handler" + # 此处为 KubeSphere 的 notification-manager 地址 + - name: notification-manager + type: bindings.http + version: v1 + metadata: + - name: url + value: http://notification-manager-svc.kubesphere-monitoring-system.svc.cluster.local:19093/api/v2/alerts + keda: + scaledObject: + pollingInterval: 15 + minReplicaCount: 0 + maxReplicaCount: 10 + cooldownPeriod: 30 + # 这里定义了函数的触发器,即 Kafka 服务器的 “logs” topic + # 同时定义了消息堆积阈值(此处为 10),即当消息堆积量超过 10,logs-handler 实例个数就会自动扩展 + triggers: + - type: kafka + metadata: + topic: logs + bootstrapServers: kafka-logs-receiver-kafka-brokers.default.svc.cluster.local:9092 + consumerGroup: logs-handler + lagThreshold: "10" +``` + +## 结果演示 + +我们先关闭 Kafka 日志接收器:在**日志收集**页面,点击进入 Kafka 日志接收器详情页面,然后点击**更多操作**并选择**更改状态**,将其设置为**关闭**。 + +停用后一段时间,我们可以观察到 logs-handler 函数实例已经收缩到 0 了。 + +再将 Kafka 日志接收器**激活**,logs-handler 随之启动。 + +```shell +~# kubectl get po --watch +NAME READY STATUS RESTARTS AGE +kafka-logs-receiver-entity-operator-568957ff84-tdrrx 3/3 Running 0 7m27s +kafka-logs-receiver-kafka-0 1/1 Running 0 7m48s +kafka-logs-receiver-zookeeper-0 1/1 Running 0 8m12s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 34s +strimzi-cluster-operator-687fdd6f77-kc8cv 1/1 Running 0 10m +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 2/2 Terminating 0 36s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 37s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-b9d6f 0/2 Terminating 0 38s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 Pending 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 0s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 0/2 ContainerCreating 0 2s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 1/2 Running 0 4s +logs-handler-serving-kpngc-v100-zcj4q-5f46996f8c-9kj2c 2/2 Running 0 11s +``` + +接着我们向 WordPress 应用一个不存在的路径发起请求: + +```shell +curl http:///notfound +``` + +可以看到 Slack 中已经收到了这条消息(与之对比的是,当我们正常访问该 WordPress 站点时, Slack 中并不会收到告警消息): + +![](https://i.imgur.com/YQc5uOq.png) + +### 进一步探索 + +同步函数的解决方案: + +为了可以正常使用 Knative Serving ,我们需要设置其网关的负载均衡器地址。(你可以使用本机地址作为 workaround) + +```bash +# 将下面的 "1.2.3.4" 替换为实际场景中的地址。 +$ kubectl patch svc -n kourier-system kourier \ +-p '{"spec": {"type": "LoadBalancer", "externalIPs": ["1.2.3.4"]}}' + +$ kubectl patch configmap/config-domain -n knative-serving \ +-type merge --patch '{"data":{"1.2.3.4.sslip.io":""}}' +``` + +除了直接由 Kafka 服务器驱动函数运作(异步方式),OpenFunction 还支持使用自带的事件框架对接 Kafka 服务器,之后以 Sink 的方式驱动 Knative 函数运作。可以参考 [OpenFunction Samples](https://github.com/OpenFunction/samples/tree/main/functions/Knative/logs-handler-function) 中的案例。 + +在该方案中,同步函数的处理速度较之异步函数有所降低,当然我们同样可以借助 KEDA 来触发 Knative Serving 的 concurrency 机制,但总体而言缺乏异步函数的便捷性。(后续的阶段中我们会优化 OpenFunction 的事件框架来解决同步函数这方面的缺陷) + +由此可见,不同类型的 Serverless 函数有其擅长的任务场景,如一个有序的控制流函数就需要由同步函数而非异步函数来处理。 + +## 综述 + +Serverless 带来了我们所期望的对业务场景快速拆解重构的能力。 + +如本案例所示,OpenFunction 不但以 Serverless 的方式提升了日志处理、告警通知链路的灵活度,还通过函数框架将通常对接 Kafka 时复杂的配置步骤简化为语义明确的代码逻辑。同时,我们也在不断演进 OpenFunction,将在之后版本中实现由自身的 Serverless 能力驱动自身的组件运作。 \ No newline at end of file diff --git a/content/zh/blogs/transform-traditional-applications-into-microservices.md b/content/zh/blogs/transform-traditional-applications-into-microservices.md new file mode 100644 index 000000000..82213575e --- /dev/null +++ b/content/zh/blogs/transform-traditional-applications-into-microservices.md @@ -0,0 +1,224 @@ +--- +title: '将传统应用改造成微服务,启用流量治理功能' +tag: 'KubeSphere, Kubernetes, 微服务' +keywords: 'KubeSphere, Kubernetes, 微服务, 流量治理, 服务网格, Service Mesh' +description: '本文将告诉你,如何将一个传统应用转化成微服务,从而来享受 Service Mesh 的各种功能,如“灰度发布”、“服务治理”、“流量拓扑”、Tracing 等功能。' +createTime: '2021-07-07' +author: 'Zackzhang' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/traffic-management.png' +--- + +## 现状 + +目前大多数用户,在使用 KubeSphere 微服务治理功能时,仅仅停留在部署 Bookinfo,“体验”一把微服务治理的功能而已。如果要完全使用微服务,仍然无法上手;更不知如何将传统服务改造成微服务。 + +本文将告诉你,如何将一个传统应用转化成微服务,从而来享受 Service Mesh 的各种功能,如灰度发布、服务治理、流量拓扑、Tracing 等功能。 + +## 介绍 + +KubeSphere 微服务使用 Application CRD,将相关联的资源抽象成了一个具体的应用,使用 Istio Application 功能,实现微服务流量治理、灰度发布、Tracing 等功能。屏蔽了 Istio 复杂的 Destination Rule 及 Virtual Service 概念,能根据流量治理设置及灰度发布策略自动生成这些资源。 + +使用 KubeSphere 微服务,需满足以下条件: + +1. Deployment 有 `app` `version` 这两个 label;Service 有 `app` Label;且 Deploy 与 service 的 App Label 一致,等于 Service Name(Istio 需要) + +2. 在一个应用内,所有资源需要有这两个标签 app.kubernetes.io/name=, app.kubernetes.io/version=(Application 需要) + +3. Deployment Name 为 Service Name 后面加 v1;如 Serevice 为 nginx, deployment 为 nginx-v1 (v3.0 及以前版本) + +4. Deployment Template 中有相应 Annotation (Istio Sidecar 自动注入需要) + +```bash + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" +``` + +5. Service/Deployment 有相应 Annotation (KubeSphere CRD Controller 会自动将 Service 同步为 Virtual Service/DestinationRules,CRD controller 需要) + +```bash +# Service +kind: Service +metadata: + annotations: + servicemesh.kubesphere.io/enabled: "true" + +# Deployment +kind: Deployment +metadata: + annotations: + servicemesh.kubesphere.io/enabled: "true" +``` + +## 示例说明 + +下面将首先创建 Wordpress + Mysql 两个单独的应用,功能正常后,将它们转化成 KubeSphere 微服务,并注入 Sidecar 实现流量治理等功能。 + +> 仅仅一个 Service 使用 Sidecar 功能是没有意义的,最少要有两个 Service 才能看到流量拓扑;因此这里使用两个最简单的服务来演示功能。 + +打开 [Mysql DockerHub](https://hub.docker.com/_/mysql "mysql dockerhub") 页面,可以看到设置 `MYSQL_ROOT_PASSWORD` 变量,可以设置默认密码。 + +打开 [Wordpress DockerHub](https://hub.docker.com/_/wordpress "wordpress dockerhub") 页面,可以看到可以数据库设置的三个变量 `WORDPRESS_DB_PASSWORD` `WORDPRESS_DB_USER` `WORDPRESS_DB_HOST`,通过设置这三个变量,让 Wordpress 连接到 Mysql 应用. + +## 创建传统的应用 + +首先创建 Workspace、Namespace, 且 Namespace 开启网关及流量治理功能。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607087691-192809-image.png) + +创建 Mysql。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607077690-536623-image.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607077768-489287-image.png) + +设置初始密码。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078188-805820-image.png) + +同理,创建一个 Stateless 的 Wordpress 服务。 +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078273-77957-image.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078333-189133-image.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078497-580755-image.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078555-252996-image.png) + +待 Pod 全部正常后,根据 Service NodePort 端口,直接访问页面,可以看到应用正常。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078616-495561-image.png) + +查看 Pod,未启用 Sidecare,每个 Pod 只有一个容器。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607078689-536270-image.png) + +由于没有使用微服务功能,无法看到流量的具体走向。下面将它转化成 Service Mesh 服务网格形式。 + +## 部署 Application 应用 + +1. apply 下面的 yaml,部署出一个 Application。 + +```bash +# wordpress.yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + annotations: + kubesphere.io/creator: admin + servicemesh.kubesphere.io/enabled: "true" + labels: + app.kubernetes.io/name: wordpress-app + app.kubernetes.io/version: v1 + name: wordpress-app # 注意应用的 name 要跟 label 定义的一样:app.kubernetes.io/name +spec: + addOwnerRef: true + componentKinds: + - group: "" + kind: Service + - group: apps + kind: Deployment + - group: apps + kind: StatefulSet + - group: extensions + kind: Ingress + - group: servicemesh.kubesphere.io + kind: Strategy + - group: servicemesh.kubesphere.io + kind: ServicePolicy + selector: + matchLabels: + # 相关资源需要打上这两个label,表示归属关系 + app.kubernetes.io/name: wordpress-app + app.kubernetes.io/version: v1 +``` + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607079099-328486-image.png) + +可以看到现在这个应用状态是 0/0,表示没有关联任何应用。 + +> 如果看不到应用状态,且无法使用 `kubectl get app` 命令,说明你环境的 Application 的 CRD 较老,更新方法: + +```bash +kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/master/roles/common/files/ks-crds/app.k8s.io_applications.yaml +``` + +2. 给相关应用打上 Application Label,声明服务归属于该应用。 + +```bash +kubectl -n sample label deploy/wordpress-v1 app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +kubectl -n sample label svc/wordpress app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 + +kubectl -n sample label sts/mysql-v1 app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +kubectl -n sample label svc/wordpress app.kubernetes.io/name=wordpress-app app.kubernetes.io/version=v1 +``` + +此时检查 App,可以发现已经可以看到应用中的关联服务数量已经不为 0 了。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607087747-296599-image.png) + +3. Deployment/Service 打上相应的 Annotation + +```bash +kubectl -n sample annotate svc/wordpress servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate deploy/wordpress-v1 servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate svc/mysql servicemesh.kubesphere.io/enabled="true" +kubectl -n sample annotate sts/mysql-v1 servicemesh.kubesphere.io/enabled="true" +``` + +4. deploy/sts templates 中增加相应 Annotation,启用 Sidecar。 + +```bash + kubectl -n sample edit deploy/wordpress-v1 +... + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" # 添加该行 + +kubectl -n sample edit sts/mysql-v1 +... + template: + metadata: + annotations: + sidecar.istio.io/inject: "true" # 添加该行 +``` + +> 注意:只需要给 Template 中加 Annotations 就可以注入 Sidecar,无需给 Mamespace 加`istio.injection=enable`的 Label。 + +检查已经注入 Sidecar。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607088879-407091-image.png) + +5. Istio 相关 Label、命名规则符合要求,如果是通过 KubeSphere 创建的服务,这两项默认支持,不用修改。 + +即 App version 相关的 Label,如果是使用 KubeSphere 页面创建的服务,会默认添加上这两个 Label。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607088007-59417-image.png) + +## 检查 + +至此,改造已经完成,检查页面。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607088980-251909-image.png) + +我们将 Wordpress 服务暴露出来。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607089069-627405-image.png) + +页面访问服务,可以看到应用正常。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607089124-868165-image.png) + +检查流量,发现正常,流量可以视化已经可以看到数据。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/1607089512-821390-image.png) + +同理,灰度发布/Tracing 功能也正常。 + +Tracing 功能需要启用 Ingress 才能看到效果。 + +## 总结 + +微服务改造按照上述步骤修改,即可顺利完成应用的改造,但是步骤仍然较为繁琐,KubeSphere 会继续优化功能,降低使用门槛。 diff --git a/content/zh/blogs/use-KubeKey-to-install-and-deploy-Kubernetes-and-kubeovn.md b/content/zh/blogs/use-KubeKey-to-install-and-deploy-Kubernetes-and-kubeovn.md index c9184623b..0437b5645 100644 --- a/content/zh/blogs/use-KubeKey-to-install-and-deploy-Kubernetes-and-kubeovn.md +++ b/content/zh/blogs/use-KubeKey-to-install-and-deploy-Kubernetes-and-kubeovn.md @@ -51,7 +51,7 @@ chmod +x kk 创建示例配置文件: ```shell -./kk create cluster --with-kubernetes v1.17.9 +./kk create cluster --with-kubernetes v1.20.4 ``` 完整的文档请参考[官方文档](https://kubesphere.com.cn/docs/installing-on-linux/introduction/multioverview/) diff --git a/content/zh/blogs/use-apache-apisix-ingress-in-kubesphere.md b/content/zh/blogs/use-apache-apisix-ingress-in-kubesphere.md new file mode 100644 index 000000000..7780aaeed --- /dev/null +++ b/content/zh/blogs/use-apache-apisix-ingress-in-kubesphere.md @@ -0,0 +1,442 @@ +--- +title: '在 Kubernetes 中安装和使用 Apache APISIX Ingress 网关' +tag: 'KubeSphere, APISIX' +keyword: 'Kubernetes, KubeSphere, APISIX, Ingress, 网关, Service Monitor' +description: '本文以 Apache APISIX Ingress Controller 为例介绍如何通过 KubeSphere 快速为 Kubernetes 集群使用两种不同类型的网关,同时对它们的使用状态进行监控。' +createTime: '2021-11-25' +author: '张海立' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/202111301254175.png' +--- + +[KubeSphere 3.2.0 发布了!](https://kubesphere.com.cn/blogs/kubesphere-3.2.0-ga-announcement/)为项目网关增配了整套监控及管理页面,同时引入了集群网关来提供集群层面全局的 Ingress 网关能力。当然,我们还是可以部署使用第三方 Ingress Controller,本文将以 [Apache APISIX Ingress Controller](https://apisix.apache.org/docs/ingress-controller/getting-started/) 为例介绍如何通过 KubeSphere 快速为 Kubernetes 集群使用两种不同类型的网关,同时对它们的使用状态进行监控。 + +本文将分为一下几部分展开: + +- KubeSphere 项目网关的新管理界面的应用展示 +- 通过 KubeSphere 的应用管理能力快速使用 Apache APISIX Ingress Controller +- 利用 KubeSphere 的自定义监控能力获取 Apache APISIX 网关的运行指标 + +## 准备工作 + +### 安装 KubeSphere + +安装 KubeSphere 有两种方法。一是在 Linux 上直接安装,可以参考文档:[在 Linux 安装 KubeSphere](https://kubesphere.com.cn/docs/quick-start/all-in-one-on-linux/); 二是在已有 Kubernetes 中安装,可以参考文档:[在 Kubernetes 安装 KubeSphere](https://kubesphere.com.cn/docs/quick-start/minimal-kubesphere-on-k8s/)。 + +KubeSphere 最小化安装版本已经包含了监控模块,因此不需要额外启用,可以通过「系统组件」页面中的「监控」标签页确认安装状态。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251132484.png) + +### 部署 httpbin 演示应用 + +由于需要演示网关的访问控制能力,我们必须要先有一个可以访问的应用作为网关的后台服务。这里我们使用 [httpbin.org](http://httpbin.org/) 提供的 [kennethreitz/httpbin](https://hub.docker.com/r/kennethreitz/httpbin/) 容器应用作为演示应用。 + +在 KubeSphere 中,我们可以先创建新的项目或使用已有的项目,进入项目页面后,选择「应用负载」下的「服务」直接创建无状态工作负载并生成配套的服务。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251132702.png) + +使用 [kennethreitz/httpbin](https://hub.docker.com/r/kennethreitz/httpbin/) 容器默认的 `80` 端口作为服务端口,创建完成后确保在「工作负载」和「服务」页面下都可以看到 `httpbin` 的对应条目,如下图所示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251133444.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251133780.png) + + +## 项目网关的新面貌 + +[项目网关](https://kubesphere.com.cn/docs/project-administration/project-gateway/) 是 KubeSphere 3.0 以来就有的功能:“KubeSphere 项目中的网关是一个 [NGINX Ingress 控制器](https://www.nginx.com/products/nginx-ingress-controller/)。KubeSphere 内置的用于 HTTP 负载均衡的机制称为 [应用路由](https://kubesphere.com.cn/docs/project-user-guide/application-workloads/routes/),它定义了从外部到集群服务的连接规则。如需允许从外部访问服务,用户可创建路由资源来定义 URI 路径、后端服务名称等信息。” + +下面我们首先进入已部署了 httpbin 服务的项目,在「项目设置」中打开「网关设置」页面,然后执行「开启网关」操作。方便起见,直接选择 `NodePort` 作为「访问方式」即可。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251135346.png) + +确定后回到网关页面,稍等片刻后刷新页面,可以得到如下图这样的部署完成状态,可以看到 NodePort 默认被赋予了两个节点端口。下面我们通过右上角的「管理」按钮「查看详情」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251135390.png) + +此时我们看到的便是 3.2.0 新带来的项目/集群网关的新监控页面!但是现在显然是没有数据的,因为我们还没有任何流量从网关产生。那么下面我们就需要为 httpbin 服务创建应用路由。 + +从「应用负载」进入「应用路由」页面,开始「创建」路由。为路由取名为 `httpbin` 后,我们指定一个方便测试的域名,并设置「路径」为 `/`, 选择「服务」`httpbin` 和「端口」`80`。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251135908.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251135500.png) + + +直接下一步跳过高级设置后完成路由创建,可以得到如下图这样的一条新的 `httpbin` 应用路由项。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251136488.png) + +下面我们可以通过项目网关的 NodePort 地址及指定的域名(如这里是 `http://httpbin.ui:32516`)来访问 httpbin 应用服务,随意刷新或操作一下页面的请求生成功能,再进入网关的详情页面,便可以看到在「监控」面板上已经出现了网关的一些内置的监控指标展示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251136470.png) + + +### 为网关指定 NodePort 节点端口 + +对于公有云环境,如果使用 NodePort 方式向外暴露访问能力,开放端口通常是有限且受控的,因此对于网关所使用的 NodePort 我们需要能够对它进行修改。 + +由于网关是被 KubeSphere 统一管理的,要修改网关服务的 NodePort,需要具备访问 `kubesphere-controls-system` 项目的权限。进入改项目后,通过「应用负载」的「服务」页面即可找到命名为 `kubesphere-router-` 形式且外部访问已开放 NodePort 的网关服务。NodePort 服务端口需要通过「编辑 YAML」来直接修改。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251136481.png) + + +## 开始使用集群网关 + +> 在 KubeSphere 3.1 中只支持项目级别的网关,如果用户的项目过多,势必会造成资源的浪费。而且不同的企业空间中的网关都是相互独立的。 +> +> KubeSphere 3.2.0 开始支持集群级别的全局网关,所有项目可共用同一个网关,之前已创建的项目网关也不会受到集群网关的影响。也可以统一纳管所有项目的网关,对其进行集中管理和配置,管理员用户再也不需要切换到不同的企业空间中去配置网关了。 + +进入 KubeSphere 3.2.0 版本之后,我们更推荐大家使用集群网关的功能来统一整个集群的应用路由。要启用集群网关其实也非常简单:使用具备集群管理权限的账号,进入其可管理的某个集群(如我们这里以 `default` 集群为例),在「集群设置」的「网关设置」中即可「开启网关」,同时查看「项目网关」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251136424.png) + +集群网关开启的方式以及对齐 NodePort 访问端口的修改和之前项目网关的操作基本上是完全一样的,所以这里对过程就不做过多赘述了。 + +**⚠️ 有一点需要特别注意的是**:集群网关开启后,已经开启的项目网关还会保留;但尚未创建网关的项目是无法再创建单独的网关的,会直接使用集群网关。 + +下图展示了已创建网关的项目,在同时拥有项目及集群网关后,在「网关设置」页面所呈现的所有网关概览。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251137447.png) + + +## 快速使用 Apache APISIX Ingress Controller + +> Apache APISIX 是一款开源的高性能、动态云原生网关,由深圳支流科技有限公司于 2019 年捐赠给 Apache 基金会,当前已经成为 Apache 基金会的顶级开源项目,也是 GitHub 上最活跃的网关项目。Apache APISIX 当前已经覆盖了 API 网关,LB,Kubernetes Ingress,Service Mesh 等多种场景。 + +社区之前也介绍过如何 [使用 Apache APISIX 作为 Kubernetes 的 Ingress Controller](https://kubesphere.com.cn/blogs/kubesphere-apacheapisix/),本文讲更多侧重介绍前文未涉及之细节,并结合 KubeSphere 的一些新功能加以具像化。 + + +### 部署 Apache APISIX Ingress Controller + +首先还是先要添加 Apache APISIX Helm Chart 仓库,推荐用这种自管理的方式来保障仓库内容是得到及时同步的。我们选定一个企业空间后,通过「应用管理」下面的「应用仓库」来添加如下一个 Apache APISIX 的仓库(仓库 URL:`https://charts.apiseven.com`)。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251137442.png) + + +接下来我们创建一个名为 `apisix-system` 的项目。进入项目页面后,选择在「应用负载」中创建「应用」的方式来部署 Apache APISIX,并选择 `apisix` 应用模版开始进行部署。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251137587.png) + +> 为何是部署 Apache APISIX 应用的 Helm Chart,而不是直接部署 Apache APISIX Ingress Controller?
    +> +> 这是因为 Apache APISIX Ingress Controller 目前和 Apache APISIX 网关是强关联的(如下图所示),且目前通过 Apache APISIX Helm Charts 同时部署 Apache APISIX Gateway + Dashboard + Ingress Controller 是最方便的,因此本文推荐直接使用 Apache APISIX 的 Helm Chart 进行整套组件的部署。 +> +> ![](https://pek3b.qingstor.com/kubesphere-community/images/202111251137250.png) + +将应用命名为 `apisix` 以避免多个组件(Gateway, Dashboard, Ingress Controller)的工作负载及服务名称产生不匹配的情况;在安装步骤中编辑的「应用设置」的部分,请参照以下配置进行填写(**请特别注意带有【注意】标记的注释部分的说明,其余可以按需自行编辑修改**)。 + +```yaml +global: + imagePullSecrets: [] + +apisix: + enabled: true + customLuaSharedDicts: [] + image: + repository: apache/apisix + pullPolicy: IfNotPresent + tag: 2.10.1-alpine + replicaCount: 1 + podAnnotations: {} + podSecurityContext: {} + securityContext: {} + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + podAntiAffinity: + enabled: false + +nameOverride: '' +fullnameOverride: '' + +gateway: + type: NodePort + externalTrafficPolicy: Cluster + http: + enabled: true + servicePort: 80 + containerPort: 9080 + tls: + enabled: false + servicePort: 443 + containerPort: 9443 + existingCASecret: '' + certCAFilename: '' + http2: + enabled: true + stream: + enabled: false + only: false + tcp: [] + udp: [] + ingress: + enabled: false + annotations: {} + hosts: + - host: apisix.local + paths: [] + tls: [] + +admin: + enabled: true + type: ClusterIP + externalIPs: [] + port: 9180 + servicePort: 9180 + cors: true + credentials: + admin: edd1c9f034335f136f87ad84b625c8f1 + viewer: 4054f7cf07e344346cd3f287985e76a2 + allow: + ipList: + - 0.0.0.0/0 + +plugins: + - api-breaker + - authz-keycloak + - basic-auth + - batch-requests + - consumer-restriction + - cors + - echo + - fault-injection + - grpc-transcode + - hmac-auth + - http-logger + - ip-restriction + - ua-restriction + - jwt-auth + - kafka-logger + - key-auth + - limit-conn + - limit-count + - limit-req + - node-status + - openid-connect + - authz-casbin + - prometheus + - proxy-cache + - proxy-mirror + - proxy-rewrite + - redirect + - referer-restriction + - request-id + - request-validation + - response-rewrite + - serverless-post-function + - serverless-pre-function + - sls-logger + - syslog + - tcp-logger + - udp-logger + - uri-blocker + - wolf-rbac + - zipkin + - traffic-split + - gzip + - real-ip + #【注意】添加此插件以配合 Dashboard 展示服务信息 + - server-info + +stream_plugins: + - mqtt-proxy + - ip-restriction + - limit-conn + +customPlugins: + enabled: true + luaPath: /opts/custom_plugins/?.lua + #【注意】如下配置保障 Prometheus 插件可对外暴露指标 + plugins: + - name: prometheus + attrs: + export_addr: + ip: 0.0.0.0 + port: 9091 + configMap: + name: prometheus + mounts: [] + +dns: + resolvers: + - 127.0.0.1 + - 172.20.0.10 + - 114.114.114.114 + - 223.5.5.5 + - 1.1.1.1 + - 8.8.8.8 + validity: 30 + timeout: 5 + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + +configurationSnippet: + main: '' + httpStart: '' + httpEnd: '' + httpSrv: '' + httpAdmin: '' + stream: '' + +etcd: + enabled: true + host: + - 'http://etcd.host:2379' + prefix: /apisix + timeout: 30 + auth: + rbac: + enabled: false + user: '' + password: '' + tls: + enabled: false + existingSecret: '' + certFilename: '' + certKeyFilename: '' + verify: true + service: + port: 2379 + replicaCount: 3 + +dashboard: + enabled: true + #【注意】为 Dashboard 开启 NodePort 方便后续使用 + service: + type: NodePort + +ingress-controller: + enabled: true + config: + apisix: + #【注意】一定要设置 gateway 所在的 namespace + serviceNamespace: apisix-system + serviceMonitor: + enabled: true + namespace: 'apisix-system' + interval: 15s +``` + +部署成功后,点击应用名称进入详情页面,可以在「资源状态」标签页下看到如下的服务部署和工作状态运行状态展示。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251139749.png) + +> 💡 Apache APISIX 项目另有的两个 Helm Chart 对应的默认配置参数可以分别参考:[Dashboard](https://github.com/apache/apisix-helm-chart/blob/master/charts/apisix-dashboard/values.yaml) 和 [Ingress Controller](https://github.com/apache/apisix-helm-chart/blob/master/charts/apisix-ingress-controller/values.yaml) 的 `values.yaml`。 + +### 使用 Apache APISIX Dashboard 了解系统信息 + +Apache APISIX 应用部署完成后,首先我们通过 Apache APISIX Dashboard 来检验一下 Apache APISIX 网关的当前状态。从「应用负载」的「服务」页面,我们可以找到 `apisix-dashboard` 的服务,由于我们在应用配置中已经为 Dashboard 开启了 NodePort,所以这里我们可以直接通过 NodePort 端口来访问 Dashboard。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251139717.png) + + +使用默认的用户名及密码 `admin` 登录 Apache APISIX Dashboard,可以进入「系统信息」页面即可查看到我们当前连接管理的「Apache APISIX 节点」的信息。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251139089.png) + +### 使用 Apache APISIX Ingress Controller + +让我们回到「应用路由」页面,另外新建一个路由(如 `apisix-httpbin`),设置路径为 `/*` `httpbin` `80` 并为其添加 `kubernetes.io/ingress.class`: `apisix` 的键值。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251139013.png) + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251140083.png) + +创建完成后如何验证应用路由生效呢?首先,我们可以回到 Apache APISIX Dashboard,进入「路由」页面,可以看到新建的应用路由已经被 Apache APISIX Ingress Controller 识别之后自动添加到了 Apache APISIX 网关中,在「上游」页面也可以看到自动创建的一个上游条目。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251140841.png) + + +然后我们回到 `apisix-system` 项目的「服务」页面,找到 `apisix-gateway` 服务对应的端口,由此访问 `:`(例如此处为 `httpbin.ui:30408`)即可访问到 `apisix-httpbin` 应用路由所关联的后台服务。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251140229.png) + + +## 自定义监控 Apache APISIX 网关 + +Apache APISIX 网关可用之后其实是缺少像原生集群或项目网关这样自带的状态监控能力的,但这个我们也可以通过 Apache APISIX 的 Prometheus 插件以及 KubeSphere 自带的自定义监控能力来弥补。 + +### 暴露 Apache APISIX 网关的 Prometheus 监控指标 + +由于我们在部署 Apache APISIX 应用时已经开启了 [Prometheus 插件](https://apisix.apache.org/docs/apisix/plugins/prometheus),所以这里我们只需要把 Prometheus 监控指标的接口暴露出来即可。进入 `apisix-system` 项目,在「工作负载」页面找到 `apisix` 并进入部署详情页面,随后在左侧操作面板的「更多操作」中选择「编辑设置」。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251140256.png) + + +在弹出的「编辑设置」面板中,进入到 `apisix` 容器的编辑界面,找到「端口设置」,添加一个新的名为 `prom` 的端口映射到容器的 `9091` 端口,保存后 `apisix` 工作负载会重启。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251141636.png) + + +### 为 Apache APISIX 网关监控指标创建 ServiceMonitor + +下面我们需要将已暴露的指标接口接入到 KubeSphere 自带的 Prometheus 中使之可被访问(被抓取指标数据),由于 KubeSphere 是通过 [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) 来维护内部的 Prometheus 系统的,所以最方便的方式自然是直接创建一个 ServiceMonitor 资源来实现指标接口的接入。 + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: apisix + namespace: apisix-system +spec: + endpoints: + - scheme: http + #【注意】使用上一步中工作负载暴露的容器端口名称 + targetPort: prom + #【注意】需要正确绑定 apisix 对应的指标接口路径 + path: /apisix/prometheus/metrics + interval: 15s + namespaceSelector: + matchNames: + - apisix-system + selector: + matchLabels: + app.kubernetes.io/name: apisix + app.kubernetes.io/version: 2.10.0 + helm.sh/chart: apisix-0.7.2 + +``` + +使用 `kubectl apply -f your_service_monitor.yaml` 创建这个 ServiceMonitor 资源。创建成功后,如果有集群管理权限,也可以在集群的 CRD 管理页面中搜索查看 ServiceMonitor 资源并找到名为 `apisix` 的自定义资源,也可以在这里做后续的 YAML 修改。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251141035.png) + + +### 将 Apache APISIX 网关指标接入自定义监控面板 + +下面我们在项目左侧菜单列表中找到「监控告警」中的「自定义监控」,开始「创建」自定义监控面板。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251141680.png) + +在弹出窗口中填入「名称」,选择「自定义」监控模版,并进入「下一步」的监控面板创建。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251141043.png) + + +进入编辑页面后现在左侧点击 `+` 区域,在右侧的「数据」区域进行 Prometheus 监控指标的配置,例如这里我们可以用 `sum(apisix_nginx_http_current_connections)` 来统计 Apache APISIX 网关实时的连接总数。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251142792.png) + + +保存后在页面右下角找到「+ 添加监控项」,我们选择「折线图」来创建一个 `Nginx connection state` 指标:使用 `sum(apisix_nginx_http_current_connections) by (state)` 作为指标、`{{state}}` 用作图例名称、选择「图例类型」为堆叠图,即可得到类似如下的图表显示效果。保存模版后即可得到您的第一个自定义监控面板! + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251142266.png) + +> Apache APISIX 网关目前提供的 Prometheus 指标可以参见官方文档的 [可有的指标](https://apisix.apache.org/zh/docs/apisix/plugins/prometheus/#%E5%8F%AF%E6%9C%89%E7%9A%84%E6%8C%87%E6%A0%87) 部分。 + + +由于指标配置起来还是比较麻烦的,推荐在集群层面的「自定义监控」中直接导入 [Apache APISIX Grafana 模版](https://grafana.com/grafana/dashboards/11719)(下载 JSON 通过「本地上传」进行导入)。 + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251142917.png) + +创建完成后可以直接得到一个非常丰富的 Apache APISIX 网关监控面板。KubeSphere 也同时在 [积极推进](https://github.com/kubesphere/kubesphere/issues/4433) 将 Grafana 模版导入的功能引入到项目的自定义监控能力中去,敬请期待! + +![](https://pek3b.qingstor.com/kubesphere-community/images/202111251142354.png) + +至此,我们了解了 KubeSphere 3.2.0 中新的项目及集群网关的更丰富的状态信息展示能力;同时也完成了 Apache APISIX Ingress 网关接入 KubeSphere 并对其使用自定义监控。让我们开启 KubeSphere 应用网关的奇妙旅程吧~ \ No newline at end of file diff --git a/content/zh/blogs/x509-certificate-exporter.md b/content/zh/blogs/x509-certificate-exporter.md index 8373db9a3..6729462e9 100644 --- a/content/zh/blogs/x509-certificate-exporter.md +++ b/content/zh/blogs/x509-certificate-exporter.md @@ -32,7 +32,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 ### 创建帐户 -安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个帐户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个帐户 `user-manager`,然后使用 `user-manager` 创建新帐户。 +安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个用户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个用户 `user-manager`,然后使用 `user-manager` 创建新帐户。 1. 以 `admin` 身份使用默认帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。 @@ -42,7 +42,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 ![](https://pek3b.qingstor.com/kubesphere-community/images/20210602121105.png) - 在**帐户角色**中,有如下所示四个可用的内置角色。接下来要创建的第一个帐户将被分配 `users-manager` 角色。 + 在**帐户角色**中,有如下所示四个可用的内置角色。接下来要创建的第一个用户将被分配 `users-manager` 角色。 | 内置角色 | 描述 | | -------------------- | ------------------------------------------------------------ | diff --git a/content/zh/case/_index.md b/content/zh/case/_index.md index 9ecd1dbdc..772d52e65 100644 --- a/content/zh/case/_index.md +++ b/content/zh/case/_index.md @@ -47,6 +47,22 @@ section2: - icon: "images/case/chinamobile-iot.png" content: "中移物联网有限公司是中国移动通信集团有限公司的全资子公司,是中国移动在物联网领域的主责企业。" link: "chinamobile-iot/" + + - icon: "images/case/uisee.png" + content: "驭势科技 UISEE 是中国领先的自动驾驶公司,致力于为全行业、全场景提供 AI 驾驶服务,交付赋能出行和物流新生态的 AI 驾驶员。" + link: "uisee/" + + - icon: "images/case/logo-qunar.png" + content: "去哪儿网(Qunar.com)是中国领先的在线旅游平台,创立于 2005 年 5 月,总部位于北京。" + link: "qunar/" + + - icon: "images/case/segmentfault-logo.png" + content: "SegmentFault 思否是国内领先的新一代开发者社区和技术媒体,是中国最大的 Hackathon 组织者,目前已覆盖和服务上千万中国软件开发者和 IT 信息从业者。" + link: "segmentfault/" + + - icon: "images/case/logo-msxf.png" + content: "马上消费金融股份有限公司(简称“马上消费”)是一家经中国银保监会批准,持有消费金融牌照的科技驱动型金融机构。" + link: "msxf/" section3: title: 'KubeSphere 助力各行各业' diff --git a/content/zh/case/msxf.md b/content/zh/case/msxf.md new file mode 100644 index 000000000..416643457 --- /dev/null +++ b/content/zh/case/msxf.md @@ -0,0 +1,128 @@ +--- +title: MSXF +description: + +css: scss/case-detail.scss + +section1: + title: 马上消费金融 + content: 马上消费金融股份有限公司(简称“马上消费”)是一家经中国银保监会批准,持有消费金融牌照的科技驱动型金融机构。 + +section2: + listLeft: + - title: 公司简介 + contentList: + - content: 马上消费金融股份有限公司(简称“马上消费”)是一家经中国银保监会批准,持有消费金融牌照的科技驱动型金融机构。截止 2020 年底,注册资本金达 40 亿元,注册用户已突破 1.2 亿,累计发放贷款超过 5400 亿元,累计纳税近 33 亿元,公司技术团队人数超过 1000 人。 + image: + + - title: 什么是 AI 中台? + contentList: + - content: 我们的技术类部门架构大致如下: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-1.png + - title: + contentList: + - content: 可以看到 AI 中台团队隶属于“人工智能研究院”大部门下,与负责“云平台”的技术部中间有一个很高的部门墙。也因此,AI 中台所需要的底层云计算相关技术并不能很好的依赖于技术部,两边有不同的考核机制、目标、痛点,所以 AI 中台团队需要自己搭建底层云平台,这也是我们引入 KubeSphere 的一个重要原因。 + - content: 我们这边主要开发的产品如下,AI 中台是作为三大中台之一,在公司内部运行在金融云之上。但是由于 AI 中台需要考虑对外输出,而金融云暂时没有这个规划,所以 AI 中台也需要独立的云方面的解决方案,换言之 AI 中台本身必须是一个完整的容器云 + AI 架构。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-2.png + - title: + contentList: + - content: 目前产品主页大致长这样: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-3.png + - title: + contentList: + - content: 首页主要展示的是监控相关信息,这些都来自 Promethues。另外从左边可以看到我们的九大功能模块:数据中心、在线标注、项目开发、算法管理、训练任务、模型发布、模型 AB、应用管理等。监控信息相对来说还是比较粗糙,上面三个圈部分是集群纬度的整体信息,包括 CPU、内存、GPU 整体信息,下面是机器纬度、应用纬度、使用人纬度分别的汇总信息。另外我们也保留了原生的监控页面: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-4.png + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-5.png + - title: + contentList: + - content: 目前 grafana 社区并没有一个合适的 GPU 纬度展示模板,NVIDIA 也只给了一个主机纬度的相对粗糙的 Dashboard。目前我们用的 GPU Dashboard 是自己开发的。还有一个调用链维度的监控: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-6.png + - title: + contentList: + - content: 另外日志我们也是用的原生 kibana 来展示,对应的工具链是 Fluent Bit + Elasticsearch + Kibana。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-7.png + - title: + contentList: + - content: 日志这里可以看到一个额外的信息,我们可以根据 APP 纬度来聚合,也就是一个应用下的不同 Pod 产生的日志可以汇总展示。这里其实是简单地根据 Pod 的 label 来实现的,将每个 Pod 打上应用相关的 Label 信息,然后采集日志时将这个属性暴露出来,就能在展示时针对性汇总。在中台发布的应用有一个日志跳转按钮,转到 Kibana 页面后会带上相关参数,实现该应用下全部日志聚合展示的功能。 + - content: 到这里可以看到整个中台虽然看起来功能还算齐全,但是面板很多,日志监控和主页分别有各自的入口,虽然可以在主页跳转到日志和监控页面,但是这里的鉴权问题、风格统一问题等已经很不和谐。但是我们团队主打的是 AI 能力,人手也有限,没有太多的精力投入到统一 Dashboard 开发上,日志监控等虽然必不可少,但也不是核心能力。这也是引入 KubeSphere 的一个重要原因。后面还会详细谈到为什么引入 KubeSphere。 + - content: 整个中台的底层架构如下图。整个中台构建在 Kubernetes 之上,在引入 KubeSphere 之前大致长这样,三主多从。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-8.png + - title: + contentList: + - content: 另外在网络上我们做了三网隔离支持,也就是业务、管理、存储可以分别使用不同的网卡,假如用户现场有多张网卡。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-9.png + + - title: 为什么需要 KubeSphere? + contentList: + - specialContent: + text: 使用 Kubernetes 面临的问题与挑战 + level: 3 + - content: 学习成本高:Kubernetes 引入了诸多新概念,要掌握 Kubernetes 达到生产落地的能力需要不少的学习时间,这里还会涉及到网络、存储、系统等方方面面知识,不是随便一个初级开发人员花时间就能掌握的。 + - content: 安装部署复杂:目前虽然已经有了 kubeadm 等一系列半自动化工具,可以接近一键部署环境,但是要搭建高可用生产集群,还是需要花不少精力深入掌握工具的各种配置细节,才能很好落地应用。 + - content: 功能组件选型复杂:要落地一套容器云并不是部署 Kubernetes 就够了,这里还有日志、监控、服务网格、存储等一系列相关组件需要落地实施,每一个方向都是涉及一系列可选方案,需要专门投入人力去学习、选型。 + - content: 隐形成本高:就算部署了 Kubernetes,后期的日常运维也需要专业的团队,对于一般中小公司来说一个 Kubernetes 运维团队的人力成本也是不小的开支,很多时候花钱还招不到合适的人,往往会陷入部署了 Kubernetes,但是出问题无人能解决的尴尬境地,通过重装来恢复环境。 + - content: 多租户模式实现复杂,安全性低:在 Kubernetes 里只有简单的 Namespace 隔离,配合 Quota 等一定程度上实现资源隔离,但是要 to C 应用还远远不够,很多时候我们需要开发一套权限管理系统来适配企业内专有的账号权限管理系统来对接,成本很高。 + - content: 缺少本土化支持:Kubernetes 一定程度上可以称为云操作系统,类比于 Linux,其实 Kubernets 更像是 kernel,我们要完整使用容器云能力,要在 Kubernets 之上附加很多的开源组件,就像 kernel 上要加很多的开源软件才能用起来 Linux 一样。很多企业,尤其是国企,会选择购买 Red Hat 等来享受企业级支持,专注于系统提供的能力本身,而不想投入太多的人力去掌握和运维系统本身。Kubernetes 本身也有这样的问题,很多企业并不希望额外投入太大的成本去使用这套解决方案,而是希望有一个类似 Redhat 系统的 Kubernetes 版本来简单化落地,而且希望免费。 + image: + - title: + contentList: + - specialContent: + text: AI 中台所面临的技术与挑战 + level: 3 + - content: 我们涉及的技术栈很广,AI 方向的,云计算方向的,还有工程开发的,也就是 Java + 前端等。但是我们的人力很稀缺,在云方向只有 2 个人,除了我之外另外一个同事擅长 IaaS 方向,在网络、存储等领域可以很好 cover 住。所以剩下的容器方向、监控日志等方向,在大公司可能每个方向一个团队,加一起大几十号人做的事情,这边只有我一个人了。所以我再有想法,有限的时间内也做不完一个平台。所以我也在寻找一个现成的解决方案,可以把自己解放出来,能够把精力投入到 AI 相关能力的建设上,比如模型训练等的 Operator 开发上,而不是整体研究日志监控组件和 Kubernetes 最佳部署实践等。 + - content: KubeSphere 提供的统一门户、多租户、多场景整体化解决方案正好能解决我的很多痛点。KubeSphere 的架构大致如下。不同于 OpenShift 的解决方案,KubeSphere 对 Kubernetes 没有侵入,而是基于 Operator 模式来拓展。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-13.png + + - type: 1 + contentList: + - content: 提供了统一门户 + - content: 多租户管理 + - content: 简化了安装部署流程 + + - title: KubeSphere 的引入 + contentList: + - content: KubeSphere 页面如下: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-16.png + - title: + contentList: + - content: 在 KubeSphere 里可以看到一个叫做 kubesphere-system/ks-installer 的资源,简写 cc,全称是 ClusterConfiguration,里面维护了集群的配置信息。我们在 ks-installer 里可以看到一个 ks-hook 配置,里面定义了 kind ClusterConfiguration,event add update,objectName ks-install,namespace kubesphere-system 等信息,这里也就是告诉 shell-operator 当 cc 发生变更的时候要触发相关代码执行。ks-installer 的核心原理是利用 shell-operator 来监听 cc 资源的变更,然后运行集群部署流程。 + - content: 每次 cc 发生 Add / Update 后,就会触发 installerRunner.py 运行,核心逻辑是: + - content: 1. 更新 cc (patch 掉环境升级场景下存量 cc 和新版 cc 结构上的差异) + - content: 2. 生成配置(将 cc 的 spec 和 status 存到本地,从而 installer 可以从 spec 中知道当前期望做什么,从 status 中可以知道集群当前状态,不需要做什么) + - content: 3. 执行前置部署流程(K8s 版本检查、ks-core 等不可或缺组件部署等) + - content: 4. 可选模块部署(并发执行剩余各个模块的部署流程) + image: + - title: + contentList: + - content: 然后再看下为什么配置里的变量可以被 ansible 识别,如下所示,在 env 里指定里 ks-config.json 和 ks-status.json 两个文件,ks-installer 运行的时候会将 cc 的 spec 和 status 分别存到这两个文件里,这样 ansible 执行的时候就可以获取到集群的期望状态和实际状态了。 + - content:每个 playbook 的入口逻辑都在 main.yaml 里,所以接着大家可以在每个模块里通过 main.yaml 来具体研究每个模块的部署流程,串在一起也就知道了整个 KubeSphere 是怎么部署起来的了。 + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-19.png + - title: + contentList: + - content: 然后 KubeSphere 和中台本身的一堆组件怎么一起部署呢?我们也参考 KubeSphere 的部署模式,加了一个 mail-installer 的 cc,然后按照下面流程来完成整个中台的部署: + image: https://pek3b.qingstor.com/kubesphere-community/images/cic-msxf-20.png + + - type: 2 + content: 'KubeSphere 提供了统一门户的整体化解决方案,降低了平台整体维护成本。' + author: '马上消费金融' + + - title: 总结 + contentList: + - content: KubeSphere 提供了统一的门户、多租户支持、多场景整体化解决方案且易于集成,很好解决了落地 Kubernetes 所面临的各种痛点问题,能够有效降低人力资源成本,提升实施效率,实现业务价值最大化。 + image: + + rightPart: + icon: /images/case/logo-msxf.png + list: + - title: 行业 + content: 消费金融 + - title: 地点 + content: 中国 + - title: 云类型 + content: 私有云 + - title: 挑战 + content: 安装部署复杂、学习成本和隐形成本高、功能组件选型复杂、缺乏本土化支持 + - title: 采用功能 + content: 日志、监控 + +--- diff --git a/content/zh/case/qunar.md b/content/zh/case/qunar.md new file mode 100644 index 000000000..1aa388f2c --- /dev/null +++ b/content/zh/case/qunar.md @@ -0,0 +1,190 @@ +--- +title: qunar +description: + +css: scss/case-detail.scss + +section1: + title: 去哪儿网(Qunar.com) + content: 去哪儿网(Qunar.com)是中国领先的在线旅游平台,创立于 2005 年 5 月,总部位于北京。 + +section2: + listLeft: + - title: 公司简介 + contentList: + - content: 去哪儿网(Qunar.com)是中国领先的在线旅游平台,创立于 2005 年 5 月,总部位于北京。去哪儿网通过网站及移动客户端的全平台覆盖,以自有技术为驱动,随时随地的为旅游服务供应商和旅行者提供专业的产品与服务。 + image: + + - title: 背景 + contentList: + - content: 近几年,云原生和容器技术非常火爆,且日趋成熟,众多企业慢慢开始容器化建设,并在云原生技术方向上不断的探索和实践。基于这个大的趋势, 2020 年底 Qunar 也向云原生迈出了第一步——容器化。 + - content: 云原生是一系列可以为业务赋能的技术架构准则,遵循它可以使应用具有扩展性、伸缩性、移植性、韧性等特点。云原生也是下一代技术栈的必选项,它可以让业务更敏捷。通过实践 DevOps、微服务、容器化、可观测性、反脆弱性(chaos engineering)、ServiceMesh、Serverless 等云原生技术栈,我们便可以享受到云原生带来的技术红利。 + image: + + - title: Qunar 容器化发展时间线 + contentList: + - content: 一项新技术要在企业内部落地从来都不是一蹴而就的,Qunar 的容器化落地也同样如此。Qunar 的容器后落地主要经历了 4 个时间节点: + - content: 2014 - 2015:业务线同学开始尝试通过 Docker、Docker-Compose 来解决联调环境搭建困难的问题,不过由于 Docker-Compose 的编排能力有限、无法解决真实的环境问题,因此容器化最后也没有推行起来。 + - content: 2015 - 2017:ops 团队把为了提高 ELK 集群的运维效率,把 ES 集群迁移到了 Mesos 平台上。后来随着 K8s 生态的成熟,把 ES 集群从 Mesos 迁移到了 K8s 平台,运维效率得到了进一步的提升。 + - content: 2018 - 2019:在业务需求不断增加的过程中,业务对测试环境的交付速度和质量有了更高的要求,为了解决 MySQL 的交付效率问题(并发量大时,网络 IO 成为了瓶颈,导致单个实例交付时长在分钟级),为了解这个问题,我们把 MySQL 容器化,通过 Docker on host 的模式可以在 10 秒之内就可以交付一个 MySQL 实例。 + - content: 2020 - 2021:云原生技术已经非常成熟了,Qunar 也决定通过拥抱云原生来为业务增加势能。在各个团队齐心协力的努力下,300+ 的 P1、P2 应用已经完成了容器化,并且计划在 2021 年年底全部业务应用实现容器化。 + image: + + - title: 落地过程与实践 + contentList: + - specialContent: + text: 容器化整体方案介绍 + level: 3 + - content: Qunar 在做容器化过程中,各个系统 Portal 平台、中间件、ops 基础设施、监控等都做了相应的适配改造,改造后的架构矩阵如下图所示。 + - content: Portal:Qunar 的 PaaS 平台入口,提供 CI/CD 能力、资源管理、自助运维、应用画像、应用授权(db 授权、支付授权、应用间授权)等功能。 + - content: 运维工具:提供应用的可观测性工具, 包括 watcher(监控和报警)、bistoury(Java 应用在线 Debug)、qtrace(tracing 系统)、loki/elk(0.提供实时日志/离线日志查看)。 + - content: 中间件:应用用到的所有中间件,mq、配置中心、分布式调度系统 qschedule、dubbo 、mysql sdk 等。 + - content: 虚拟化集群:底层的 K8s 和 OpenStack 集群。 + - content: Noah:测试环境管理平台,支持应用 KVM/容器混合部署。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-arch.webp + - title: + contentList: + - specialContent: + text: CI/CD 流程改造 + level: 3 + - content: 主要改造点如下,下方图分别为改造前和改造后。 + - content: 1. 应用画像:把应用相关的运行时配置、白名单配置、发布参数等收敛到一起,为容器发布提供统一的声明式配置。 + - content: 2. 授权系统:应用所有的授权操作都通过一个入口进行,并实现自动化的授权。 + - content: 3. K8s 多集群方案:通过调研对比,KubeSphere 对运维优化、压测评估后也满足我们对性能的要求,最终我们选取了 KubeSphere 作为多集群方案。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-cicd-1.webp + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-cicd-2.webp + - title: + contentList: + - specialContent: + text: 中间件适配改造 + level: 3 + - content: 改造关注点:由于容器化后,IP 经常变化是常态,所以各个公共组件和中间件要适配和接受这种变化。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-gaizaodian.webp + - title: + contentList: + - specialContent: + text: 应用平滑迁移方案设计 + level: 3 + - content: 为了帮助业务快速平滑地迁移到容器,我们制定了一些规范和自动化测试验证等操作来实现这个目标。 + - content: 1. 容器化的前置条件:应用无状态、不存在 post_offline hook(服务下线后执行的脚本)、check_url 中不存在预热操作。 + - content: 2. 测试环境验证:自动升级 SDK、自动迁移。我们会在编译阶段帮助业务自动升级和更改 pom 文件来完成 SDK 的升级,并在测试环境部署和验证,如果升级失败会通知用户并提示。 + - content: 3. 线上验证:第一步线上发布,但不接线上流量,然后通过自动化测试验证,验证通过后接入线上流量。 + - content: 4. 线上 KVM 与容器混部署:保险起见,线上的容器和 KVM 会同时在线一段时间,等验证期过后再逐步下线 KVM。 + - content: 5. 线上全量发布:确认服务没问题后,下线 KVM。 + - content: 6. 观察:观察一段时间,如果没有问题则回收 KVM。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-qianyi.webp + + - type: 1 + contentList: + - content: 提升多集群管理便捷性 + - content: 提高运维人员效率 + - content: 保障业务数据安全性 + + - title: 容器化落地过程中碰到的问题 + contentList: + - specialContent: + text: 如何兼容过去 KVM 的使用方式,并支持 preStart、preOnline hook 自定义脚本? + level: 3 + - content: KVM 场景中 hook 脚本使用场景介绍:preStart hook——用户在这个脚本中会自定义命令,比如环境准备;preOnline hook——用户会定义一些数据预热操作等,这个动作需要在应用 checkurl 通过并且接入流量前执行。 + - content: 问题点:K8s 原生只提供了 preStop、postStart 2 种 hook, 它们的执行时机没有满足上述 2 个 KVM 场景下业务用到的 hook。 + - content: 分析与解决过程: + - content: preStart hook:在 entrypoint 中注入 preStart hook 阶段,容器启动过程中发现有自定义的 preStart 脚本则执行该脚本,至于这个脚本的位置目前规范是定义在代码指定目录下。 + - content: preOnline hook:由于 preOnline 脚本执行时机是在应用 checkurl 通过后,而应用容器是单进程,所以在应用容器中执行这个是行不通的。而 postStart hook 的设计就是异步的,与应用容器的启动也是解耦的, 所以我们初步的方案选择了 postStart hook 做这个事情。实施方案是 postStart hook 执行后会不断轮询应用的健康状态,如果健康检测 checkurl 通过了, 则执行 preOnline 脚本。脚本成功后则进行上线操作, 即在应用目录下创建 healthcheck.html 文件,OpenResty 和中间件发现这个文件后就会把流量接入到这个实例中。 + - content: 按照上面的方案,Pod 的组成设计如下: + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-pod.webp + - title: + contentList: + - specialContent: + text: 发布过程读不到标准输入输出 + level: 3 + - content: 场景介绍:在容器发布过程中如果应用启动失败,我们通过 K8s API 是拿不到实时的标准输入输出流,只能等到发布设置的超时阈值,这个过程中发布人员心里是很焦急的,因为不确定发生了什么。如下图所示,部署过程中应用的更新工作流中什么都看不到。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-pipline.webp + - title: + contentList: + - content: 问题点:K8s API 为什么拿不到标准输入输出? + - content: 分析与解决过程: + - content: 通过 kubectl logs 查看当时的 Pod 日志,什么都没有拿到,超时时间过后才拿到。说明问题不在程序本身,而是在 K8s 的机制上;查看 postStart Hook 的相关文档,有一段介绍提到了 postHook 如果执行时间长或者 hang 住,容器的状态也会 hang 住,不会进入 running 状态, 看到这条信息,大概猜测到罪魁祸首就是这个 postStart hook 了。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-hook.webp + - title: + contentList: + - content: 基于上面的猜测,把 postStart hook 去掉后测试,应用容器的标准输入可以实时拿到了。 + - content: 找到问题后,解决方法也就简单了,把 postStart hook 中实现的功能放到 Sidecar 中就可以解决。至于 Sidecar 如何在应用容器的目录中创建 healthcheck.html 文件,就需要用到共享卷了。新的方案设计如下: + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-sidecar.webp + - title: + contentList: + - content: 使用上述方案后,发布流程的标准输入输出、自定义 hook 脚本的输出、Pod 事件等都是实时可见的了, 发布过程更透明了。 + image: + - title: + contentList: + - specialContent: + text: 并发拉取镜像超时 + level: 3 + - content: 场景介绍:我们的应用是多机房多集群部署的,当一个应用的新版本发布时,由于应用的实例数较多,有 50+ 个并发从 harbor 拉取镜像时,其中一些任务收到了镜像拉取超时的报错信息,进而导致整个发布任务失败。超时时间是 kubelet 默认设置的 1 分钟。 + - content: 分析与解决: + - content: 通过排查最终确认是 harbor 在并发拉取镜像时存在性能问题,我们采取的优化方案是通用的 p2p 方案,DragonFly + Harbor。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-harbor.webp + - title: + contentList: + - specialContent: + text: 并发大时授权接口抗不住 + level: 3 + - content: 场景介绍:应用发布过程中调用授权接口失败,K8s 的自愈机制会不断重建容器并重新授权,并发量比较大,最终把授权服务拖垮。 + - content: 我们的容器授权方案如下: + - content: 1. Pod init 容器启动时进行调研授权接口进行授权操作,包括 ACL 和 mysql 的白名单。 + - content: 2. 容器销毁时会执行 Sidecar 容器的 preStop hook 中执行权限回收操作。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-authorization.webp + - title: + contentList: + - content: 问题点:ACL 授权接口涉及到了防火墙,QPS 比较低,大量容器进行 ACL 授权时把服务拖垮。 + - content: 分析与解决过程: + - content: 为了解决上述的问题,限量和降低授权接口调用次数是有效的解决方式。我们采取了下面几个措施:init 容器中的重试次数限制为 1 次;授权接口按应用和 IP 限流, 超过 3 次则直接返回失败,不会再进行授权操作;ACL 中涉及的一些通用的端口,我们统一做了白名单,应用无需再进行授权操作。 + image: + - title: + contentList: + - specialContent: + text: Java 应用在容器场景下如何支持远程 Debug + level: 3 + - content: KVM 场景 Debug 介绍:在开发 Java 应用的过程中,通过远程 Debug 可以快速排查定位问题,因此是开发人员必不可少的一个功能。Debug 具体流程:开发人员在 Noah 环境管理平台的界面点击开启 Debug, Noah 会自动为该 Java 应用配置上 Debug 选项,-Xdebug -Xrunjdwp:transport=dt_socket, server=y, suspend=n, address=127.0.0.1:50005,并重启该 Java 应用,之后开发人员就可以在 IDE 中配置远程 Debug 并进入调试模式了。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-debug.webp + - title: + contentList: + - content: 容器场景的 Debug 方案:测试环境的 Java 应用默认开启 Debug 模式,这样也避免了更改 Debug 重建 Pod 的过程,速度从 KVM 的分钟级到现在的秒级。当用户想开启 Debug 时,Noah 会调用 K8s exec 接口执行 socat 相关命令进行端口映射转发,让开发人员可以通过 socat 开的代理连接到 Java 应用的 Debug 端口。 + - content: 问题点:容器场景下在用户 Debug 过程中,当请求走到了设置的断点后,Debug 功能失效。 + - content: 分析与解决过程: + - content: 1. 复现容器场景下 Debug,观察该 Pod 的各项指标,发现 Debug 功能失效的时候系统收到了一个 liveness probe failed,kill pod 的事件。根据这个事件可以判断出当时 liveness check 失败,应用容器才被 kill 的,应用容器重启代理进程也就随之消失了,Debug 也就失效了。 + - content: 2. 关于 Debug 过程 checkurl 为什么失败的问题,得到的答案是 Debug 时当请求走到断点时,整个 JVM 是 hang 住的,这个时候任何请求过来也会被 hang 住,当然也包括 checkurl,于是我们也特地在 KVM 场景和容器场景分布做了测试,结果也确实是这样的。 + - content: 3. 临时解决方案是把断点的阻断级别改为线程级的,这样就不会阻断 checkurl 了, idea 中默认的选项是 Suspend All,改为 Suspend Thread 即可。不过这个也不是最优解,因为这个需要用户手工配置阻断级别,有认知学习成本。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-idea.webp + - title: + contentList: + - content: 4. 回到最初的问题上,为什么容器场景下遇到这个问题,而 KVM 没有,主要是因为容器场景 K8s 提供了自愈能力,K8s 会定时执行 liveness check, 当失败次数达到指定的阈值时,K8s 会 kill 掉容器并重新拉起一个新的容器。 + - content: 5. 那我们只好从 K8s 的 liveness 探针上着手了,探针默认支持 exec、tcp 、httpGet 3 种模式,当前使用的是 httpGet,这种方式只支持一个 url, 无法满足这个场景需求。经过组内讨论, 最后大家决定用这个表达式 (checkurl == 200) || (socat process && java process alive) 在作为应用的 liveness 检测方式,当 Debug 走到断点的时候, 应用容器就不会阻断了, 完美的解决了这个问题。 + - content: 以上就是我们落地容器化过程中遇到的几个问题与我们的解决思路。其中很重要的一点是从 KVM 迁移到容器时需要考虑用户的使用习惯、历史功能兼容等要点,要做好兼容和取舍,只有这样容器化落地才会更顺畅。 + image: + + - type: 2 + content: '使用 KubeSphere 作为多 K8s 集群管理平台,大大提高了运维同学的工作效率,同时作为统一的集群入口,它也保障了业务数据的安全。' + author: '去哪儿网' + + - title: 未来展望 + contentList: + - content: 多集群稳定性治理:让可观测性数据更全面、覆盖度更广,进而完善我们的 APM 系统,提升排查问题效率;通过实施混沌工程来验证、发现和消除容器化场景的稳定性盲区。 + - content: 提高资源利用率:根据业务指标实现弹性扩缩容;根据应用的历史数据智能的调整 requests。 + - content: ServiceMesh 方案落地:我们是基于 Istio 和 MOSN 以及当前的基础架构做的 mesh 方案,目前在测试阶段,这套方案落地后相信会让基础架构更敏捷。 + image: + + rightPart: + icon: /images/case/logo-qunar.png + list: + - title: 行业 + content: 旅游业 + - title: 地点 + content: 中国 + - title: 云类型 + content: 私有云 + - title: 挑战 + content: 多集群管理 + - title: 采用功能 + content: 多集群管理, 事件管理 + +--- diff --git a/content/zh/case/segmentfault.md b/content/zh/case/segmentfault.md new file mode 100644 index 000000000..21d6f38ee --- /dev/null +++ b/content/zh/case/segmentfault.md @@ -0,0 +1,122 @@ +--- +title: SegmentFault +description: + +css: scss/case-detail.scss + +section1: + title: SegmentFault 思否 + content: SegmentFault 思否是国内领先的新一代开发者社区和技术媒体,也是中国最大的 Hackathon 组织者,目前已经覆盖和服务了上千万中国软件开发者和 IT 信息从业者。 + +section2: + listLeft: + - title: 公司简介 + contentList: + - content: SegmentFault(https://segmentfault.com/)是一家综合性技术社区,由于它的内容跟编程技术紧密相关,因此访问量的波动也和这一群体的作息时间深度绑定。通常情况下 web 页面的请求量峰值在 800 QPS 左右,但我们还做了前后端分离,所以 API 网关的峰值 QPS 是请求量峰值的好几倍。 + image: + + - title: 架构历史 + contentList: + - content: 2012 年,为了帮助中文开发者用母语在像 StackOverflow 这样的网站上提问,SegmentFault 诞生。但是第一个版本非常简陋,访问量很少。将它放在了国外的 VPS 托管商 Linode 上,所有的应用、数据库、缓存都挤在一个实例上。 + - content: 2013-2014 年,我们选择了自己购买二手服务器去机房托管,经常遇到问题,我们团队在外地又去不了机房,只能等管理员去机房帮我们解决。正好在 2014 年我们的网站被 DDos 攻击了,机房为了不连累其他服务器,直接把我们的网线拔掉了。 + - content: 2014-2019 年,中国的云计算也开始起步了,于是我们把整个网站从物理服务器迁移到了云服务上。当然使用上并没有什么不同,只是把物理机器替换成了虚拟主机。 + - content: 2020 年至今,随着云原生理念的兴起,我们的业务模式也发生了很大变化,为了让系统架构适应这些变化,我们把网站的主要业务都迁移到了 KubeSphere 上。 + image: + + - title: 遇到的挑战 + contentList: + - content: 我们遇到了不少挑战,促使我们不得不往 K8s 架构上迁移。 + - content: 其次,复杂的场景引发了复杂的配置管理,不同的业务要用到不同的服务,不同的版本,即使用自动化脚本效率也不高。 + - content: 另外,我们内部人员不足,所以没有专职运维,现在 OPS 的工作是由后端开发人员轮值的。但后端开发人员还有自己本职工作要做,所以对我们最理想的场景是能把运维工作全部自动化。 + - content: 最后也是最重要的一点就是我们要控制成本。当然,如果资金充足,以上的问题都不是问题,但是对于创业公司(特别是像我们这种访问量比较大,但是又不像电商,金融那些盈利的公司)来说,我们必将处于且长期处于这个阶段。因此能否控制好成本,是一个非常重要的问题。 + image: + + - title: 前后端分离 + contentList: + - content: 2020 年以前,SegmentFault 的网站还是非常传统的后端渲染页面的方法,所以服务端的架构也非常简单。服务端将浏览器的 http 请求转发到后端的 php 服务,php 服务渲染好页面后再返回给浏览器。这种架构用原有的部署方法还能支撑,也就是在各个实例上部署 php 服务,再加一层负载均衡就基本满足需求了。 + image: https://pek3b.qingstor.com/kubesphere-community/images/1694153167.png + - title: + contentList: + - content: 然而随着业务的持续发展,后端渲染的方式已经不适合我们的项目规模了,因此我们在 2020 年做了架构调整,准备将前后端分离。前后端分离的技术特点我在这里就不赘述了,这里主要讲它给我们带来了哪些系统架构上的挑战。一个是入口增多,因为前后端分离不仅涉及到客户端渲染(CSR),还涉及到服务端渲染(SSR),所以响应请求的服务就从单一的服务变成了两类服务,一类是基于 node.js 的 react server 服务(用来做服务端渲染),另一类是 基于 php 写的 API 服务(用来给客户端渲染提供数据)。而服务端渲染本身还要调用 API,而我们为了优化服务端渲染的连接和请求响应速度,还专门启用了了使用专有通讯协议的内部 API 服务。 + image: https://pek3b.qingstor.com/kubesphere-community/images/2947375573.png + - title: + contentList: + - content: 所以实际上我们的 WEB SERVER 有三类服务,每种服务的环境各不相同,所需的资源不同,协议不同,各自之间可能还有相互连接的关系,还需要负载均衡来保障高可用。在快速迭代的开发节奏下,使用传统的系统架构很难再去适应这样的结构。 + - content: 我们迫切需要一种能够快速应用的,方便部署各种异构服务的成熟解决方案。 + image: + + - title: KubeSphere 带来了什么? + contentList: + - specialContent: + text: 开箱即用 + level: 3 + - content: 首先是开箱即用,理论上来说这应该是 KubeSphere 的优点,我们直接点一点鼠标就可以打造一个高可用的 K8s 集群。这一点对我们这种没有专职运维的中小团队来说很重要。根据我的亲身经历,要从零开始搭建一个高可用的 K8s 集群还是有点门槛的,没有接触过这方面的运维人员,一时半会是搞不定的,其中的坑也非常多。 + - content: 如果云厂商能提供这种服务是最好的,我们不用在服务搭建与系统优化上花费太多时间,可以把更多的精力放到业务上去。之前我们还自己搭建数据库,缓存,搜索集群,后来全部都使用云服务了。这也让我们的观念有了转变,云时代的基础服务,应该把它视为基础设施的一部分加以利用。 + image: https://pek3b.qingstor.com/kubesphere-community/images/41259074.png + - title: + contentList: + - specialContent: + text: 用代码管理部署 + level: 3 + - content: 如果能把运维工作全部用代码来管理,那就再理想不过了。而目前 K8s 确实给我们提供了这样一个能力,现在我们每个项目都有一个 Docker 目录,里面放置了不同环境下的 Dockerfile,K8s 配置文件等等。不同的项目,不同的环境,不同的部署,一切都可以在代码中描述出来加以管理。 + - content: 比如我们之前提到的同样的 API 服务,使用两种协议,变成了两个服务。在这现在的架构下,就可以实现后端代码一次书写,分开部署。其实这些文件就代替了很多部署操作,我们需要做的只是定义好以后执行命令把它们推送到集群。 + - content: 而一旦将这些运维工作代码化以后,我们就可以利用现有的代码管理工具,像写代码一样来调整线上服务。更关键的一点是,代码化之后无形中又增加了版本管理功能,这离我们理想中的全自动化运维又更近了一步。 + image: + - title: + contentList: + - specialContent: + text: 持续集成,快速迭代 + level: 3 + - content: 持续集成标准化了代码发布流程,如果能将持续集成和 K8s 的部署能力结合起来,无疑能大大加快项目迭代速度。而在使用 K8s 之前我们就一直用 GitLab 作为版本管理工具,它的持续集成功能对我们来说也比较适用。在做了一些脚本改造之后,我们发现它也能很好地服务于现有的 K8s 架构,所以也没有使用 K8s 上诸如 Jenkins 这样的服务来做持续集成。 + - content: 步骤其实也很简单,做好安全配置就没什么问题。我们本地跑完单元测试之后,会自动上线到本地的测试环境。在代码合并到上线分支后,由管理员点击确认进行上线步骤。然后在本地 build 一个镜像推送到镜像服务器,通知 K8s 集群去拉取这个镜像执行上线,最后执行一个脚本来检查上线结果。整个流程都是可视化可追踪的,而且在代码管理界面就可以完成,方便开发者查看上线进度。 + image: https://pek3b.qingstor.com/kubesphere-community/images/qunar-gaizaodian.webp + + - type: 1 + contentList: + - content: 开箱即用的特性大大降低了企业容器化的使用门槛 + - content: 更加精细地控制服务资源粒度,降低企业运营成本 + - content: 实现全自动化测试部署,大大提高了发布效率 + + - title: 总结经验 + contentList: + - specialContent: + text: 管理好基础镜像 + level: 3 + - content: 目前我们用一个专门的仓库来管理这些基础镜像,这可以使开发人员拥有与线上一致的开发环境,而且后续的版本升级也可以在基础镜像中统一完成。 + - content: 除了将 Dockerfile 文件统一管理以外,我们还将镜像 build 服务与持续集成结合起来。每个 Dockerfile 文件都有一个所属的 VERSION 文件,每次修改里面的版本号并提交,系统都会自动 build 一个相应的镜像并推送到仓库。基础镜像的管理工作完全自动化了,大大减少了人为操作带来的错误与混乱。 + image: + - title: + contentList: + - specialContent: + text: KubeSphere 使用 + level: 3 + - content: 别把日志服务放到集群里。这一点在 KubeSphere 文档中就有提及。具体到日志服务,主要就是一个 Elastic 搜索服务,自建一个Elastic 集群即可。因为日志服务本身负载比较大,而且对硬盘的持续性需求高,如果你会发现日志服务本身就占据了集群里相当大的资源,就得不偿失了。 + - content: 如果生产环境要保证高可用,还是要部署 3 个或以上的节点。从我们使用的经验来看,主节点偶尔会出现问题。特别是遇到节点机器要维护或者升级的时候,多个主节点可以保证业务的正常运行。 + - content: 如果你本身不是专门提供数据库或缓存的服务商,这类高可用服务就不要上K8s,因为要保证这类服务高可用本身就要耗费你大量的精力。我建议还是尽量用云厂商的服务。 + - content: 副本的规模和集群的规模要匹配。如果你的容器只有几个节点,但一个服务里面扩展了上百个副本,系统的调度会过于频繁从而把资源耗尽。所以这两者要相匹配,在系统设计的时候就要考虑到。 + image: + + - type: 2 + content: 'KubeSphere 的应用改变了我们的整个开发流程,它让运维工作可视化,代码化,标准化。从而使得我们的开发人员可以参与到整个交付流程中去,大大提升了产品发布效率。而得益于 KubeSphere 良好的交互设计,我们还能快速定位和处理线上事故,提高了系统运行的稳定性。' + author: 'SegmentFault' + + - title: 未来展望 + contentList: + - content: 随着后端的不断微服务化,我们将越来越依赖 K8s 提供的低成本系统管理能力,而在这之中 KubeSphere 也扮演了越来越重要的角色。当前系统架构的趋势是自动化和标准化,管理系统的手段也将从配置变为代码,这大大增加系统的灵活性和可维护性,而复杂性也随之提升。软件决定了我们看待计算能力的方式,KubeSphere 这类软件带来的新的可能将促使我们不断探索系统架构的新思路。 + image: + + rightPart: + icon: /images/case/segmentfault-logo.png + list: + - title: 行业 + content: 技术社区 + - title: 地点 + content: 中国 + - title: 云类型 + content: 公有云 + - title: 挑战 + content: 人员、运营成本、自动化脚本效率低 + - title: 采用功能 + content: DevOps、CI/CD + +--- diff --git a/content/zh/case/uisee.md b/content/zh/case/uisee.md new file mode 100644 index 000000000..366e1722c --- /dev/null +++ b/content/zh/case/uisee.md @@ -0,0 +1,162 @@ +--- +title: uisee +description: + +css: scss/case-detail.scss + +section1: + title: 驭势科技 + content: 驭势科技UISEE 是中国领先的自动驾驶公司,致力于为全行业、全场景提供 AI 驾驶服务,交付赋能出行和物流新生态的AI驾驶员。 + +section2: + listLeft: + - title: 公司简介 + contentList: + - content: 驭势科技成立于 2016 年 2 月,坚持立足本土研发,根植中国市场。总部和研发中心设立在北京,在上海嘉定和浙江嘉善分别设有研发中心、研发试制和应用创新中心。此外,在深圳、广西、成都、武汉等地均设有业务分支机构。 + - content: 驭势科技依托自主研发的 U-Drive 智能驾驶平台,在业务上已经形成可规模化部署的 L3-L4 级智能驾驶系统,可以满足多场景、高级别的自动驾驶需求。2019 年,驭势科技率先在机场和厂区实现了“去安全员”无人驾驶常态化运营的重大突破,落地“全场景、真无人、全天候”的自动驾驶技术,并由此迈向大规模商用。 + image: https://pek3b.qingstor.com/kubesphere-community/images/20210927-175846.jpeg + + - title: 行业背景 + contentList: + - content: 驭势科技(UISEE)是国内领先的自动驾驶公司,致力于为全行业、全场景提供 AI 驾驶服务,交付赋能出行和物流新生态的 AI 驾驶员。由于需要保障各个场景下 “真 · 无人”(即无安全员在车上或跟车)的业务运作,我们比较注重在 “云端大脑” 上做了一些保障其高可用和可观察性方面的实践。 + - content: 让我们假设有这样一个场景:在一个厂区运行了几十台的无人物流拖车,考虑到 “真无人” 环境下的安全运营,我们会采取车云连接长时间断开(一般为秒级)即停车的策略;如果在运营的过程中,云端出现故障且缺乏高可用能力,这将造成所有车辆停运。显然这对业务运营会造成巨大影响,因此云平台的稳定性和高可用性是非常重要和关键的。 + image: + + - title: 为什么选择 KubeSphere + contentList: + - content: 我们和 KubeSphere 的结缘可以说是 “始于颜值,陷于才华”。早在 KubeSphere v2.0 发布之时,我们因缘际会在社区的新闻中留意到这个产品,并马上被它 “小清新” 的界面所吸引。于是,从 v2.0 开始我们便开始在私有云上进行小范围试用,并在 v2.1 发布之后开始投入到我们公有云环境的管理中使用。 + - content: KubeSphere v3.0 是一个非常重要的里程碑发布,它带来了 Kubernetes 多集群管理的能力、进一步增强了在监控和告警方面的能力,并在 v3.1 中持续对这些能力进行夯实。由此,我们也开始更大范围地将 KubeSphere 应用到我们自有的和客户托管的集群(及在其中运行的工作负载)的管理上,同时我们也在进一步探索如何将现有的 DevOps 环境和 KubeSphere 做整合,最终的目标还是希望将 KubeSphere 打造成我们内部面向云原生各应用、服务、平台的统一入口和集中管理的核心。 + - content: 正是由于 KubeSphere 提供了这样优秀的管控能力,使得我们有了更多时间从业务角度去提升云平台的可用性。这次分享的两个内容就是我们早期和现在正在推进的两项可用性相关的实践。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-1.png + + - type: 1 + contentList: + - content: 提升多集群管理便捷性 + - content: 降低监控告警维护成本 + - content: 对于入门 K8s 非常友好 + + - title: “高可用”实践:提供热备能力的 Operator + contentList: + - content: “高可用” 方面,我们期望解决的问题是如何确保云端服务出现故障时可以用最快的速度重新恢复到稳定运行的状态。 + - specialContent: + text: 限定区域 L4 无人驾驶场景的 “高可用” 诉求 + level: 3 + - content: “高可用” 从时间量化的角度通常就是几个 9 级别选择,但落到具体的业务场景,所面临的问题和挑战却是各不相同的。如上图所列举的,对于我们 “限定区域 L4 无人驾驶场景” 而言,以 toB 业务为主所造成的客户私有云种类繁多、对于恢复过程容忍度不同、以及客户定制服务产生的历史包袱较多是制约我们构建高可用方案的几个主要问题。面对这些限制,我们选择了一个比较 “简单粗暴” 的思路,试图 “化繁为简” 跳出跨云高可用成本高、为服务附加高可用能力融合风险高的常见问题包围圈。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-2.png + - title: + contentList: + - specialContent: + text: 一种通过 Operator 实热备切换的高可用方法 + level: 3 + - content: 如下图所示,这个方案的思路很直接 —— 实现服务 Pod 状态监测并在状态异常时进行主备 Pod 切换。如果我们从 Controller 的 “Observe - Analyze - Act” 体系来看,它做了如下工作: + - content: 1. 监测。同时能够监测 Pod / Deployment / StatefulSet / Service 的变化(包括能够监控特定的 Namespace);监测到有变化则触发 Reconcile 调协过程(即以下两个操作)。 + - content: 2. 判断。遍历所有 Service,获取 Deployment / StatefulSet,将其 status 中的服务的总数量与可用数量进行比较;如果有副本不可用,则再遍历 dp/sts 里面的 Pod,通过容器的状态及重启次数来找到不健康的 Pod,当一个 dp/sts 下所有的 Pod 都不健康,则认为这个服务整体不健康。 + - content: 3. 切换。同一个服务部署主备两套 dp/sts,在当前服务的 dp/sts 指向的 Pod 全都不健康时(即整个服务不健康),若另一套 dp/sts 健康,切换至另一套 dp/sts。 + - content: 在这个 Operator 的开发框架上,我们选用了 Kubernetes 官方社区的 Operator SDK,即 Kubebuilder。从技术上看,它对于编写 Controller 通常需要的核心组件 client-go 有比较好的封装,可以帮助开发者更专注于业务逻辑的开发;从社区的支持角度看,发展也比较平稳。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-3.png + - title: + contentList: + - specialContent: + text: 行百里者半九十:高可用功能落地的长尾在于测试 + level: 3 + - content: 由于 “高可用” 功能的特殊性,它的测试尤其重要,但常规的测试手段可能并不是很适用(这里存在一个有意思的 “悖论”:测试是为了发现问题,而高可用的启用会避免发生问题)。所以我们在完成这个 Operator 的开发后,其实更多的时间是花在测试方面,在这里我们主要实施了三个方面的测试工作: + - content: 1. 端到端的 BDD 测试。这块作为基础功能验证和测试,我们使用了支持 Cucumber BDD 测试框架的 Godog 项目(支持 Cucmber Gherkin 语法),BDD 也适合业务方直接导入需求。 + - content: 2. 针对运行环境的混沌测试。这块我们使用 ChaosBlade 对 Kubernetes 物理节点的系统运行环境进行相关混沌测试,以检验出现基建故障时的高可用表现。 + - content: 3. 针对业务层面的混沌测试。这里我们使用 Chaos Mesh 对主备服务进行 Pod 级别的测试,使用 Chaos Mesh 一方面是由于它在这个层面功能覆盖比较全面,另一方面也是因为它的 Dashboard 便于管理测试所用到的各项测试用例。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-5.png + + - title: “可观察”实践:车云端到端 SkyWalking 接入 + contentList: + - content: “可观察” 方面,我们期望解决的问题是在服务故障恢复后,如何确保我们能够尽可能快的定位到问题的根源,以便尽早真正消除问题隐患。 + - specialContent: + text: 无人驾驶车云一体化架构下的 “可观察” 诉求 + level: 3 + - content: “车云一体化” 架构是无人驾驶的一个重要核心,从云端视角来看,它的一个巨大挑战就是业务链路非常长,远长于传统的互联网纯云端的业务链路。这个超长链路上任意一点的问题都有可能引发故障,轻则告警、重则导致车辆异常离线,所以对于链路上的点点滴滴、各式各样的信息我们总是希望能够应收尽收,以便于定位问题。同时,纯粹的日志类数据也是不够的,因为链路太长且又分布在车云的不同地方,单靠日志不便于快速定位问题发生的区间从而进行有针对性的问题挖掘。 + - content: 为了便于大家更具象的了解链路之长,下图我们给出了一个抽象的 “车云一体化” 架构图,感兴趣的朋友可以数一下这 “7 x 2” 的调用链路。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-7.png + - title: + contentList: + - specialContent: + text: 通过 SkyWalking 实现车云全链路追踪 + level: 3 + - content: Apache SkyWalking 是社区中一个优秀而且活跃的可观察性平台项目,它同时提供了 Logging、Metrics、Tracing 可观察性三元组的功能,其中尤以追踪能力最为扎实。为了方便大家对后续内容有更好的把握,这边也简单整理几个关键点供大家参考: + - content: Trace:一个 Trace 代表一个潜在的,分布式的,存在并行数据或并行执行轨迹(潜在的分布式、并行)的系统。一个 Trace 可以认为是多个 Span 的有向无环图(DAG)。 + - content: Span:在服务中埋点时,最需要关注的内容。一个 Span 代表系统中具有开始时间和执行时长的逻辑运行单元。Span 之间通过嵌套或者顺序排列建立逻辑因果关系。在 SkyWalking 中,Span 被区分为:LocalSpan—服务内部调用方法时创建的 Span 类型;EntrySpan—请求进入服务时会创建的 Span 类型(例如处理其他服务对于本服务接口的调用);ExitSpan—请求离开服务时会创建的 Span 类型(例如调用其他服务的接口)。SkyWalking 中,创建一个 ExitSpan 就相当于创建了一个 Parent Span,以 HTTP 请求为例,此时需要将 ExitSpan 的上下文编码后,放到请求的 Header 中;在另一个服务接收到请求后,需要创建一个 EntrySpan,并从 Header 中解码上下文信息,以解析出它的 Parent 是什么。通过这样的方式,ExitSpan 和 EntrySpan 就可以串联在一起。SkyWalking 中未对 ChildOf 和 FollowsFrom 两种类型的 Span 作区分。 + - content: TraceSegment:SkyWalking 中的概念,介于 Trace 和 Span 之间,是一条 Trace 的一段,可以包含多个 Span。一个 TraceSegment 记录了一个线程中的执行过程,一个 Trace 由一个或多个 TraceSegment 组成,一个 TraceSegment 又由一个或多个 Span 组成。 + - content: SpanContext:代表跨越进程上下文,传递到下级 Span 的状态。在 Go 中,通过 context.Context 在同一个服务中进行传递。 + - content: Baggage:存储在 SpanContext 中的一个键值对集合。它会在一条追踪链路上的所有 Span 内全局传输,包含这些 Span 对应的 SpanContext。Baggage 会随着 Trace 一同传播。SkyWalking 中,上下文数据通过名为 sw8 的头部项进行传递,值中包含 8 个字段,由 - 进行分割(包括 Trace ID,Parent Span ID 等等)。另外 SkyWalking 中还提供名为 sw8-correlation 的扩展头部项,可以传递一些自定义的信息。 + - content: 与 Jaeger / Zipkin 相比,虽然都是对 OpenTracing 的实现,但是 ExitSpan、EntrySpan 的概念是在 SkyWalking 中独有的,使用下来体验较好的点在于: 使用语义化的 ExitSpan 和 EntrySpan,使代码逻辑更为清晰;希望逻辑清晰的原因是,有时候创建 Span 确实容易出错,尤其是在对服务链路不熟悉的情况下。所以进行埋点时,对 OpenTracing 的理解是基础,也需要了解服务的链路。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-8.png + - title: + contentList: + - content: SkyWalking 的插件体系是保障我们得以在一个庞大的微服务架构中进行埋点的基础,官方为 Java、Python、Go、 Node.js 等语言都提供了插件,对 HTTP 框架、SQL、NoSQL、MQ、RPC 等都有插件支持(Java 的插件最为丰富,有 50+,其他语言的插件可能没有这么全面 )。我们基于 Go 和 Python 官方插件的研发思路,又进一步扩展和自制了一些插件,例如: + - content: Go · GORM:GORM 支持为数据库操作注册插件,只需在插件中创建 ExitSpan。 + - content: Go · gRPC:利用 gRPC 拦截器,在 metadata(类似 HTTP 的 Header) 中写入上下文。 + - content: Go · MQTT:没有找到可以使用的中间件,所以直接写了函数,在发布和收到消息时手动调用。 + - content: Python · MQTT:在 Payload 中写入 Carrier(可参考 OpenTracing 中 Baggage 的概念,携带包含 Trace 上下文信息的键值对) 中的上下文数据。 + - content: Python · Socket:由于比较底层,按照官方做法自定义 Socket 插件后,HTTP 请求、MQTT 收发消息都会被记录,输出信息过多;所以又自定义了两个函数结合业务手动调用。 + image: + + - title: 前途是光明的,道路是曲折的 —— 记一些我们踩过的坑 + contentList: + - content: 由于微服务架构中涉及的语言环境、中间件种类以及业务诉求通常都比较丰富,这导致在接入全链路追踪的过程中难免遇到各种主观和客观的坑,这里给大家介绍几个常见场景。 + - specialContent: + text: 案例一:Kong 网关的插件链路接入问题 + level: 3 + - content: 找不到官方插件是最常见的一种接入问题。比如我们在接入 SkyWalking 时,官方还未发布 SkyWalking 的 Kong 插件(5 月才发布)。我们因为业务需要在 Kong 中接入了自定义的一个权限插件,用于对 API 和资源的授权;这个插件会调用权限服务的接口进行授权。那么这个插件中的调用,也应属于调用链中的一环,所以我们的解决思路是直接在权限插件中进行了埋点,具体形成的链路如下图所示。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-9.png + - title: + contentList: + - specialContent: + text: 案例二:跨线程/跨进程的链路接入问题 + level: 3 + - content: 对于跨线程,这里给大家一个提示:可以使用函数 capture() 以及 continued();使用 Snapshot,为 Context 上下文创建快照。对于跨进程,我们遇到一个比较坑的坑是 Python 版本问题:Python 服务中新起一个进程后,原先进程的 SkyWalking Agent 在新进程中无法被使用;需要重新启动一个 Agent 才能正常使用,实践后发现 Python 3.9 可行,Python 3.5 中则会报错 “agent can only be started once”。 + image: + - title: + contentList: + - specialContent: + text: 案例三:官方 Python Redis 插件 Pub/Sub 断路问题 + level: 3 + - content: 这个案例是一个典型的官方插件不能覆盖现实业务场景的问题。官方提供的 Python 库中,有提供 Redis 插件;一开始我们认为安装了 Redis 插件,对于一切 Redis 操作,都能互相连接;但是实际上,对于 Pub/Sub 操作,链路是会断开的。 + - content: 查看代码后发现,对于所有的 Redis 操作,插件都创建一个 ExitSpan。但是在我们的场景中,需要进行 Pub/Sub 操作;这导致两个操作都会创建 ExitSpan,而使链路无法相连。对于这种情况,最后我们通过改造了一下插件来解决问题,大家如果遇到类似情况也需要注意官方插件的功能定位。 + image: + - title: + contentList: + - specialContent: + text: 案例四:MQTT Broker 的多种 Data Bridge 接入问题 + level: 3 + - content: 一般来说,对 MQTT Broker 的追踪链路是 Publisher => Subscriber;但是也存在场景,MQTT Broker 接收到消息后,通过规则引擎调用通知中心的接口;而规则引擎调用接口时,没有办法把 Trace 信息放到 Header 中。 + - content: 这是一个典型的中间件高级能力未被插件覆盖的问题。通常这种情况还是得就坡下驴,按实际情况做定制。比如这个案例中我们通过约定好参数名称,放到请求体中,在通知中心收到请求后,从请求体中抽取链路的 Context 的方式最终实现了下图的链路贯通。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-10.png + + - title: + contentList: + - content: 最后也总结一下我们在 “可观察” 这部分的一些实践体会:首先,还是需要依托一个成熟的持续演进的工具/平台;其次,就是依靠它,同时也要和它一起不断成长、不断自我完善;最后,断路不可怕,伟人说过 “星星之火,可以燎原”,明确目标、坚持努力,一定有机会解决问题的。 + image: + + - type: 2 + content: 'KubeSphere 帮助我们快速建立起多集群多租户的 K8s 管理系统,使我们可以高效的管理不同云上的多套生产环境,同时及时掌握其中工作负载的运行状态和性能指标。' + author: '驭势科技' + + - title: 未来展望 + contentList: + - content: 回首我们这一路和 KubeSphere 携手走来的历程,KubeSphere 已然成为我们日常运维中必不可少的一部分。再次感谢 KubeSphere 团队为中国乃至全球的开源社区贡献了这么一个卓越的云原生产品,我们也希望能尽自己所能多参与社区建设,与 KubeSphere 社区共成长! + - content: 我们同时也期盼着 KubeSphere 在后续的版本中提升开放接入外部系统的能力:这样一方面我们可以把内部的一些运维管理系统桥接、挂钩进 KubeSphere 形成统一的一站式的内部运维门户;另一方面也期待更多国内优秀的社区产品和 KubeSphere 形成合力更广泛的服务国内外广大的云原生社区用户。 + - content: 总之,期待基于 KubeSphere 这个优秀的平台底座,不断建设和打磨 DevOps 闭环,把云原生技术对于云端业务开发的效能提升作用推向极致。 + image: https://pek3b.qingstor.com/kubesphere-community/images/haili-cic-11.png + + rightPart: + icon: /images/case/uisee.png + list: + - title: 行业 + content: 自动驾驶 + - title: 地点 + content: 北京、上海、浙江、深圳 + - title: 云类型 + content: 公有云、私有云 + - title: 挑战 + content: 高可用、多集群管理、监控及告警 + - title: 采用功能 + content: 多集群及多租户管理、监控及告警 + +--- diff --git a/content/zh/common/kubernetes-versions.md b/content/zh/common/kubernetes-versions.md index 846d3bdd2..3cf40149b 100644 --- a/content/zh/common/kubernetes-versions.md +++ b/content/zh/common/kubernetes-versions.md @@ -5,5 +5,5 @@ _build: | 安装工具 | KubeSphere 版本 | 支持的 Kubernetes 版本 | | ------------ | --------------- | ------------------------------------------------------------ | -| KubeKey | v3.1.1 | v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9, v1.18.3, v1.18.5, v1.18.6, v1.18.8, v1.19.0, v1.19.8, v1.19.9, v1.20.4, v1.20.6 | -| ks-installer | v3.1.1 | v1.17.x, v1.18.x, v1.19.x, v1.20.x | +| KubeKey | 3.2.0 | v1.19.x, v1.20.x, v1.21.x, v1.22.x(实验性支持) | +| ks-installer | 3.2.0 | v1.19.x, v1.20.x, v1.21.x, v1.22.x(实验性支持) | diff --git a/content/zh/conferences/_index.md b/content/zh/conferences/_index.md index 99255fc68..22b24a67c 100644 --- a/content/zh/conferences/_index.md +++ b/content/zh/conferences/_index.md @@ -5,10 +5,59 @@ css: "scss/conferences.scss" viewDetail: 查看详情 list: - - name: KubeCon 大会 + - name: KubeCon 大会 2021 + content: KubeSphere 社区在 KubeCon + CloudNativeCon 2021 上的技术主题分享。 + icon: images/conferences/kubecon.svg + bg: images/conferences/kubecon-bg.svg + bgColor: linear-gradient(270deg, rgb(101, 193, 148), rgb(76, 169, 134)) + children: + - name: 基于 RBAC 和 Kubefed 的 Kubernetes 多集群和多租户管理 + summary: 软隔离是一种没有严格隔离不同用户、工作负载或应用程序的隔离形式。就 Kubernetes 而言,软隔离通常由 RBAC 和命名空间隔离。当集群管理员跨多个 Kubernetes 集群实现隔离时,会遇到许多挑战,如身份验证和授权、资源配额、网络策略、安全策略等。 + author: 万宏明 + link: rbac/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-hongming.png + + - name: 用统一的方式分发 Helm 和 Operator 应用 + summary: 许多应用程序定义和框架都来自云原生计算基金会领域,Helm 和 Operator 是 Kubernetes 生态系统中打包和管理应用程序的最流行方式。根据云原生计算基金会 2020 年的调查,以多集群和多云为代表的企业架构已成为现代基础设施的新趋势。如何利用以应用为中心的概念来提供自助服务,跨多个 Kubernetes 集群和云交付/部署应用程序? + author: 赖正一 + link: apps/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-zhengyi.png + + - name: 用云原生无服务器技术构建现代 FaaS(函数即服务)平台 + summary: 作为无服务器的核心,FaaS(函数即服务)越来越受到人们的关注。新兴的云原生无服务器技术可以通过用更强大的云原生替代方案替换 FaaS平台的关键组件,从而构建一个强大的现代 FaaS 平台。 + author: 霍秉杰,雷万钧 + link: openfunction/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-ben.png + + - name: 去哪儿网 Kubernetes 多集群和金丝雀部署最佳实践 + summary: 随着云原生时代的到来,学习和拥抱云原生不可避免,因为其可以使业务运营更加敏捷。容器化是将应用转移到 Kubernetes 之前的第一步。如何将数以千计的应用程序高效、顺畅地从基于内核的虚拟机 (KVM) 迁移到容器已成为去哪儿网基础设施团队面临的一个巨大挑战。 + author: 邹晟,陈靖贤 + link: qunar/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-qunar.png + + - name: KubeCon 大会 2020 + content: KubeSphere 团队在 KubeCon + CloudNativeCon 2020 上的技术主题分享。 + icon: images/conferences/kubecon.svg + bg: images/conferences/kubecon-bg.svg + bgColor: linear-gradient(270deg, rgb(101, 193, 148), rgb(76, 169, 134)) + children: + - name: 基于云原生系统通用模型的计量计费系统 + summary: 云原生服务比传统云服务更具弹性和可定制性。计算能力、存储和网络能力应该按需求实时分配,指标计量和计费参数复杂,产品定价策略将依靠许多参数不仅包括资源指标,和不同的资源提供者将使用不同的程序创建和释放资源,所以硬编码的计量/计费系统不能满足快速增长的需求。 + author: Anne Song,马丹 + link: metering/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2020-metering.png + + - name: 多租户环境中的 Kubernetes 事件导出、过滤和警报 + summary: K8s 事件管理的各个方面,包括事件导出、过滤、告警及通知;如何使用 Kube-Events Operator 管理事件导出、过滤和告警;如何处理多租户环境中的事件告警需求;如何使用 Alertmanager 像管理 Prometheus发出的告警一样,来管理 K8s 事件告警;如何使用 Notification Manager 在多租户环境下管理 Alertmanager 发出的通知。 + author: 霍秉杰,向军涛 + link: event/ + image: https://pek3b.qingstor.com/kubesphere-community/images/kubecon2020-event.jpg + + - name: KubeCon 大会 2019 content: KubeSphere 团队在 KubeCon + CloudNativeCon 2019 Shanghai 上的技术主题分享。 icon: images/conferences/kubecon.svg bg: images/conferences/kubecon-bg.svg + bgColor: linear-gradient(270deg, rgb(101, 193, 148), rgb(76, 169, 134)) children: - name: Porter-面向裸金属环境的 Kubernetes 开源负载均衡器 summary: 我们知道,在 Kubernetes 集群中可以使用 “LoadBalancer” 类型的服务将后端工作负载暴露在外部。云厂商通常为 Kubernetes 提供云上的 LB 插件,但这需要将集群部署在特定 IaaS 平台上。然而,许多企业用户通常都将 Kubernetes… @@ -27,11 +76,12 @@ list: author: 夏润泽 link: jenkins-x/ image: https://pek3b.qingstor.com/kubesphere-docs/png/20190930095450.png - + - name: QCon 全球软件开发大会 content: icon: images/conferences/qcon.svg bg: images/conferences/qcon-bg.svg + bgColor: linear-gradient(to left, rgb(52, 197, 209), rgb(95, 182, 216)) children: - name: 基于 CSI Kubernetes 存储插件的开发实践 summary: 现在很多用户都会将自己的应用迁移到 Kubernetes 容器平台中。在 Kubernetes 容器平台中,存储是支撑用户应用的基石。随着用户不断的将自己的应用深度部署在 K8S 容器平台中,但是我们现有的 Kubernetes… diff --git a/content/zh/conferences/apps.md b/content/zh/conferences/apps.md new file mode 100644 index 000000000..02bfe88d2 --- /dev/null +++ b/content/zh/conferences/apps.md @@ -0,0 +1,24 @@ +--- +title: '用统一的方式分发 Helm 和 Operator 应用' +author: '赖正一' +createTime: '2021-12-09' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-zhengyi.png' +--- + +## 议题简介 + +许多应用程序定义和框架都来自云原生计算基金会领域,Helm 和 Operator 是 Kubernetes 生态系统中打包和管理应用程序的最流行方式。根据云原生计算基金会 2020 年的调查,以多集群和多云为代表的企业架构已成为现代基础设施的新趋势。如何利用以应用为中心的概念来提供自助服务,跨多个 Kubernetes 集群和云交付/部署应用程序?KubeSphere 团队正在构建一个统一的控制面,使用户能够以一致的工作流交付应用程序和云功能。在本次演讲中,KubeSphere 维护人员讨论:使用 CRD 简化 Helm Chart 和 Operator 部署,如何跨多个云传播原生应用程序,如何跨多个云管理 Operator 及其 CRD,如何在优雅的界面中扩展操作符。 + +## 分享者简介 + +赖正一,KubeSphere 研发工程师,为 Helm, virtualkubelet, grpc-gateway 等做出了贡献。他还在 KubeSphere 中维护应用程序商店、网络和可插拔架构。他的主要工作集中在网络、多集群、应用程序交付和云原生技术,如 Artifact Hub 和 Kube-OVN。 + +## 视频回放 + + + +## 对应文章 + +整理中,敬请期待 diff --git a/content/zh/conferences/event.md b/content/zh/conferences/event.md new file mode 100644 index 000000000..c86510bff --- /dev/null +++ b/content/zh/conferences/event.md @@ -0,0 +1,23 @@ +--- +title: '多租户环境中的 Kubernetes 事件导出、过滤和警报' +author: '霍秉杰,向军涛' +createTime: '2020-07-30' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2020-event.jpg' +--- + +## 议题简介 + +K8s 事件管理的各个方面,包括事件导出、过滤、告警及通知;如何使用 Kube-Events Operator 管理事件导出、过滤和告警;如何处理多租户环境中的事件告警需求;如何使用 Alertmanager 像管理 Prometheus发出的告警一样,来管理 K8s 事件告警;如何使用 Notification Manager 在多租户环境下管理 Alertmanager 发出的通知。 + +## 分享者简介 + +霍秉杰,KubeSphere 可观测性负责人 + +向军涛,KubeSphere 研发工程师 + +## 视频回放 + + + diff --git a/content/zh/conferences/metering.md b/content/zh/conferences/metering.md new file mode 100644 index 000000000..9a8591cbc --- /dev/null +++ b/content/zh/conferences/metering.md @@ -0,0 +1,25 @@ +--- +title: '基于云原生系统通用模型的计量计费系统' +author: 'Anne Song,马丹' +createTime: '2020-07-31' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2020-metering.png' +--- + +## 议题简介 + +云原生服务比传统云服务更具弹性和可定制性。计算能力、存储和网络能力应该按需求实时分配,指标计量和计费参数复杂,产品定价策略将依靠许多参数不仅包括资源指标,和不同的资源提供者将使用不同的程序创建和释放资源,所以硬编码的计量/计费系统不能满足快速增长的需求。 + +在原生云时代,定价的通用模型、计量和计费的标准API是一个强烈的需求。此外,计量/计费系统本身应该是实时的、健壮的、可扩展的和安全的。青云QingCloud 产品经理 Anne Song 和资深软件工程师马丹提出一个计量/计费模型来满足所有这些需求,并根据该模型实现了一个工作系统。 + +## 分享者简介 + +Anne Song,青云QingCloud 产品经理 + +马丹,资深软件工程师 + +## 视频回放 + + + diff --git a/content/zh/conferences/openfunction.md b/content/zh/conferences/openfunction.md new file mode 100644 index 000000000..32c39197b --- /dev/null +++ b/content/zh/conferences/openfunction.md @@ -0,0 +1,31 @@ +--- +title: '用云原生无服务器技术构建现代 FaaS(函数即服务)平台' +author: '霍秉杰,雷万钧' +createTime: '20 21-12-10' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-ben.png' +--- + +## 议题简介 + +作为无服务器的核心,FaaS(函数即服务)越来越受到人们的关注。新兴的云原生无服务器技术可以通过用更强大的云原生替代方案替换 FaaS平台的关键组件,从而构建一个强大的现代 FaaS 平台。在本次分享中,OpenFunction 的维护人员分享讨论: + +- 构成 FaaS 平台的关键组成部分,包括函数框架、函数构建、函数服务以及函数事件管理。 +- 新兴云原生无服务器技术在 FaaS 各个关键领域中的优势,包括 Knative 服务、Cloud Native Buildpacks、Shipwright、Tekton、KEDA 和 Dapr。 +- 如何以 OpenFunction 为例,利用这些云原生技术构建强大的现代 FaaS 平台。 +- 事件管理对 FaaS 很重要的原因。既然已经有了 Knative eventing 和 Argo Events,为什么 OpenFunction 还要创建自己的事件管理系统“OpenFunction Events”? + +## 分享者简介 + +霍秉杰,云原生 FaaS 项目 OpenFunction Founder;FluentBit Operator 的发起人;他还是几个可观测性开源项目的发起人,如 Kube-Events、Notification Manager 等;热爱云原生和开源技术,是 Prometheus Operator, Thanos, Loki, Falco 的贡献者。 + +雷万钧,OpenFunction Maintainer,负责开发 OpenFunction;FluentBit Operator 的维护者;KubeSphere 可观测性团队的成员,负责 Notification Manager 的开发;云原生和开源技术的爱好者,fluent bit 和 nats 的贡献者。 + +## 视频回放 + + + +## 对应文章 + +整理中,敬请期待 \ No newline at end of file diff --git a/content/zh/conferences/qunar.md b/content/zh/conferences/qunar.md new file mode 100644 index 000000000..19435284d --- /dev/null +++ b/content/zh/conferences/qunar.md @@ -0,0 +1,33 @@ +--- +title: '去哪儿网 Kubernetes 多集群和金丝雀部署最佳实践' +author: '陈靖贤,邹晟' +createTime: '2021-12-10' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-qunar.png ' +--- + +## 议题简介 + +去哪儿网是中国领先的在线旅游平台,成立于 2005 年 5 月,总部位于北京。去哪儿网基础设施团队在 2020 年底开始研究和应用 k8s,其计划在 2021 年底前将在生产中将所有应用程序迁移到 Kubernetes 中。目前去哪儿网使用 KubeSphere 作为多 K8s 集群管理平台,大大提高了运维同学的工作效率,同时作为统一的集群入口,它也保障了业务数据的安全。 + +随着云原生时代的到来,学习和拥抱云原生不可避免,因为其可以使业务运营更加敏捷。容器化是将应用转移到 Kubernetes 之前的第一步。如何将数以千计的应用程序高效、顺畅地从基于内核的虚拟机 (KVM) 迁移到容器已成为去哪儿网基础设施团队面临的一个巨大挑战。在此次分享中,去哪儿网基础设施团队分享讨论: + +- 如何将 CI/CD 模式从 KVM 发展到云原生时代 +- 如何运用多集群和基于批量的 Canary 部署帮助应用程序、sdk 顺利进行升级 +- 从 CI/CD 演化路径中吸取的教训。 +- KubeSphere 多集群在去哪儿网的落地实践 + +## 分享者简介 + +陈靖贤,去哪儿网 DevOps 产品经理,目前主要负责在去哪儿传播 DevOps 文化,调查、导入和开发流程、工具、平台的最佳实践,帮助公司以更快的速度交付软件,降低风险,降低运营成本。 + +邹晟,去哪儿网基础架构团队高级 DevOps 工程师,现主要负责 CI/CD 平台开发与维护,云原生技术研究与实现。同时也是 KubeSphere Talented Speaker。 + +## 视频回放 + + + +## 对应文章 + +整理中,敬请期待 \ No newline at end of file diff --git a/content/zh/conferences/rbac.md b/content/zh/conferences/rbac.md new file mode 100644 index 000000000..a07fe5e9c --- /dev/null +++ b/content/zh/conferences/rbac.md @@ -0,0 +1,24 @@ +--- +title: '基于 RBAC 和 Kubefed 的 Kubernetes 多集群和多租户管理' +author: '万宏明' +createTime: '2021-12-09' +snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/kubecon2021-hongming.png' +--- + +## 议题简介 + +软隔离是一种没有严格隔离不同用户、工作负载或应用程序的隔离形式。就 Kubernetes 而言,软隔离通常由 RBAC 和命名空间隔离。当集群管理员跨多个 Kubernetes 集群实现隔离时,会遇到许多挑战,如身份验证和授权、资源配额、网络策略、安全策略等。在本次演讲中,KubeSphere 维护人员分享了他们在设计隔离体系结构方面的经验和最佳实践。如何跨多个集群管理用户和身份验证。如何管理不同集群租户的资源配额。资源隔离机制以及如何跨多个集群授权资源。 + +## 分享者简介 + +万宏明,KubeSphere 研发工程师 & 核心贡献者,KubeSphere 多租户和安全团队负责人,专注于开源和云原生安全领域。 + +## 视频回放 + + + +## 对应文章 + +整理中,敬请期待 \ No newline at end of file diff --git a/content/zh/docs/access-control-and-account-management/_index.md b/content/zh/docs/access-control-and-account-management/_index.md index e619943b6..33466f417 100644 --- a/content/zh/docs/access-control-and-account-management/_index.md +++ b/content/zh/docs/access-control-and-account-management/_index.md @@ -10,4 +10,4 @@ icon: "/images/docs/docs.svg" --- -The multi-tenant architecture of KubeSphere underlies many key components running on the container platform. Different tenants are assigned with varied roles so that they can perform related tasks. This chapter outlines the multi-tenant system of KubeSphere and demonstrates how to configure authentication for third-party login. \ No newline at end of file +KubeSphere 的多租户架构是运行在容器平台上的许多关键组件的基础。不同的租户被分配不同的角色,以便他们可以执行相关的任务。本章概述了 KubeSphere 的多租户系统,并演示了如何为第三方登录配置身份验证。 \ No newline at end of file diff --git a/content/zh/docs/access-control-and-account-management/configuring-authentication.md b/content/zh/docs/access-control-and-account-management/configuring-authentication.md deleted file mode 100644 index 02a8191d9..000000000 --- a/content/zh/docs/access-control-and-account-management/configuring-authentication.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "认证配置" -keywords: "LDAP, identity provider" -description: "How to configure authentication" - -linkTitle: "认证配置" -weight: 12200 ---- - -KubeSphere 包含一个内置的 OAuth 服务和帐户系统。用户通过获取 OAuth 访问令牌以对 API 进行身份验证。 - -## 认证配置 - -作为管理员,您可以通过以下命令修改认证配置: - - -```bash -kubectl -n kubesphere-system edit cc ks-installer -``` - -*配置示例*: - -```yaml -apiVersion: installer.kubesphere.io/v1alpha1 -kind: ClusterConfiguration -metadata: - name: ks-installer -spec: - authentication: - jwtSecret: ******************************** - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m - oauthOptions: - accessTokenInactivityTimeout: 30m - accessTokenMaxAge: 1h - identityProviders: - - mappingMethod: auto - name: github - type: GitHubIdentityProvider - provider: -... -``` - -参数释意: - -* `authenticateRateLimiterMaxTries`: `authenticateLimiterDuration`指定的期间内允许的最大连续登录失败次数。如果用户连续登录失败次数达到限制,则该用户将被封禁。 - -* `authenticateRateLimiterDuration`: 作用于 `authenticateRateLimiterMaxTries`。 - -* `loginHistoryRetentionPeriod`: 用户登录记录保留期限,过期条目将被自动删除。 - -* `maximumClockSkew`: 控制执行对时间敏感的操作(例如验证用户令牌的过期时间)时允许的最大时钟偏移,默认值为10秒。 - -* `multipleLogin`: 允许多个用户同时从不同位置登录,默认值为 `true`。 - -* `jwtSecret`: 签发用户令牌的密钥,最小长度为32个字符。[多集群环境需要注意的事项](../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster)。 - -`oauthOptions`: OAuth settings - * `accessTokenMaxAge`: 访问令牌有效期。对于多集群环境中的成员集群,默认值为 `0h`,这意味着访问令牌永不过期。对于其他集群,默认值为 `2h`。 - * `accessTokenInactivityTimeout`: 令牌空闲超时时间。该值表示令牌过期后,刷新用户令牌最大的间隔时间,如果不在此时间窗口内刷新用户身份令牌,用户将需要重新登录以获得访问权。 - * `identityProviders`: Identity providers - * `name`: 身份提供者的名称。 - * `type`: 身份提供者的类型。 - * `mappingMethod`: 帐户映射方式. 值可以是 `auto` 或者 `lookup`。 - * 默认值为 `auto`, 通过第三方帐户登录时会自动创建关联帐户。 - * 如果值为 `lookup`, 你需要手动关联第三方帐户与KubeSphere帐户。 - * `provider`: Identity provider 配置,此部分中的字段根据身份提供的类型而异。 - -当您修改上述配置后,需要等待配置生效,可以通过以下命令查看相关进度及日志: - -```bash - kubectl -n kubesphere-system logs -l app=ks-installer -f -``` - -如果 `mappingMethod` 设置为 `lookup`, 可以通过以下命令进行帐户关联。 如果 `mappingMethod` 是 `auto` 你可以跳过这个部分。 - - ```bash - kubectl edit user - ``` - - ```yaml - labels: - iam.kubesphere.io/identify-provider: - iam.kubesphere.io/origin-uid: - ``` - -{{< notice note >}} - -多集群环境中,只需要在 Host 集群中进行配置。 - -{{}} - - -## 身份提供者 - -您可以在 `identityProviders` 部分中配置多个身份提供者(IdentityProvider, IdP)。身份提供者会对用户进行认证,并向 KubeSphere 提供身份令牌。 - -KubeSphere 默认提供了以下几种类型的身份提供者: - -* [LDAPIdentityProvider](../ldap-identity-provider) - -* [OIDCIdentityProvider](../oidc-identity-provider) - -* [GitHubIdentityProvider]() - -* [CASIdentityProvider]() - -* [AliyunIDaaSProvider]() - -您也可以拓展 KubeSphere [OAuth2 认证插件](../use-an-oauth2-identity-provider)与您的帐户系统进行集成。 diff --git a/content/zh/docs/access-control-and-account-management/external-authentication/_index.md b/content/zh/docs/access-control-and-account-management/external-authentication/_index.md new file mode 100644 index 000000000..fe7156336 --- /dev/null +++ b/content/zh/docs/access-control-and-account-management/external-authentication/_index.md @@ -0,0 +1,8 @@ +--- +title: "外部身份验证" +description: "了解如何在 KubeSphere 上配置第三方身份验证。" +layout: "single" + +linkTitle: "外部身份验证" +weight: 12200 +--- diff --git a/content/zh/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md b/content/zh/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md new file mode 100644 index 000000000..e5a1d1296 --- /dev/null +++ b/content/zh/docs/access-control-and-account-management/external-authentication/oidc-identity-provider.md @@ -0,0 +1,64 @@ +--- +title: "OIDC 身份提供者" +keywords: "OIDC, 身份提供者" +description: "如何使用外部 OIDC 身份提供者。" + +linkTitle: "OIDC 身份提供者" +weight: 12221 +--- + +## OIDC 身份提供者 + +[OpenID Connect](https://openid.net/connect/) 是一种基于 OAuth 2.0 系列规范的可互操作的身份认证协议。使用简单的 REST/JSON 消息流,其设计目标是“让简单的事情变得简单,让复杂的事情成为可能”。与之前的任何身份认证协议(例如 Keycloak、Okta、Dex、Auth0、Gluu、Casdoor 等)相比,开发人员集成起来非常容易。 + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec.authentication.jwtSecret` 字段下添加以下字段。 + + *使用 [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect) 的示例*: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: google + type: OIDCIdentityProvider + mappingMethod: auto + provider: + clientID: '********' + clientSecret: '********' + issuer: https://accounts.google.com + redirectURL: 'https://ks-console/oauth/redirect/google' + ``` + + 字段描述如下: + + | 参数 | 描述 | + | -------------------- | ------------------------------------------------------------ | + | clientID | 客户端 ID。 | + | clientSecret | 客户端密码。 | + | redirectURL | 重定向到 ks-console 的 URL,格式为:`https://<域名>/oauth/redirect/<身份提供者名称>`。URL 中的 `<身份提供者名称>` 对应 `oauthOptions:identityProviders:name` 的值。 | + | issuer | 定义客户端如何动态发现有关 OpenID 提供者的信息。 | + | preferredUsernameKey | 可配置的密钥,包含首选用户声明。此参数为可选参数。 | + | emailKey | 可配置的密钥,包含电子邮件声明。此参数为可选参数。 | + | getUserInfo | 使用 userinfo 端点获取令牌的附加声明。非常适用于上游返回 “thin” ID 令牌的场景。此参数为可选参数。 | + | insecureSkipVerify | 关闭 TLS 证书验证。 | + + + diff --git a/content/zh/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md b/content/zh/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md new file mode 100644 index 000000000..4c8a8fd53 --- /dev/null +++ b/content/zh/docs/access-control-and-account-management/external-authentication/set-up-external-authentication.md @@ -0,0 +1,112 @@ +--- +title: "设置外部身份验证" +keywords: "LDAP, 外部, 第三方, 身份验证" +description: "如何在 KubeSphere 上设置外部身份验证。" + +linkTitle: "设置外部身份验证" +weight: 12210 +--- + +本文档描述了如何在 KubeSphere 上使用外部身份提供者,例如 LDAP 服务或 Active Directory 服务。 + +KubeSphere 提供了一个内置的 OAuth 服务。用户通过获取 OAuth 访问令牌以对 API 进行身份验证。作为 KubeSphere 管理员,您可以编辑 CRD `ClusterConfiguration` 中的 `ks-installer` 来配置 OAuth 并指定身份提供者。 + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec.authentication.jwtSecret` 字段下添加以下字段。 + + 示例: + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + loginHistoryRetentionPeriod: 168h + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + + 字段描述如下: + + * `jwtSecret`:签发用户令牌的密钥。在多集群环境下,所有的集群必须[使用相同的密钥](../../../multicluster-management/enable-multicluster/direct-connection/#prepare-a-member-cluster)。 + * `authenticateRateLimiterMaxTries`:`authenticateLimiterDuration` 指定的期间内允许的最大连续登录失败次数。如果用户连续登录失败次数达到限制,则该用户将被封禁。 + * `authenticateRateLimiterDuration`:`authenticateRateLimiterMaxTries` 适用的时间段。 + * `loginHistoryRetentionPeriod`:用户登录记录保留期限,过期的登录记录将被自动删除。 + * `maximumClockSkew`:时间敏感操作(例如验证用户令牌的过期时间)的最大时钟偏差,默认值为10秒。 + * `multipleLogin`:是否允许多个用户同时从不同位置登录,默认值为 `true`。 + * `oauthOptions`: + * `accessTokenMaxAge`:访问令牌有效期。对于多集群环境中的成员集群,默认值为 `0h`,这意味着访问令牌永不过期。对于其他集群,默认值为 `2h`。 + * `accessTokenInactivityTimeout`:令牌空闲超时时间。该值表示令牌过期后,刷新用户令牌最大的间隔时间,如果不在此时间窗口内刷新用户身份令牌,用户将需要重新登录以获得访问权。 + * `identityProviders`: + * `name`:身份提供者的名称。 + * `type`:身份提供者的类型。 + * `mappingMethod`:帐户映射方式,值可以是 `auto` 或者 `lookup`。 + * 如果值为 `auto`(默认),需要指定新的用户名。通过第三方帐户登录时,KubeSphere 会根据用户名自动创建关联帐户。 + * 如果值为 `lookup`,需要执行步骤 3 以手动关联第三方帐户与 KubeSphere 帐户。 + * `provider`:身份提供者信息。此部分中的字段根据身份提供者的类型而异。 + +3. 如果 `mappingMethod` 设置为 `lookup`,可以运行以下命令并添加标签来进行帐户关联。如果 `mappingMethod` 是 `auto` 可以跳过这个部分。 + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +4. 字段配置完成后,保存修改,然后等待 ks-installer 重启完成。 + + {{< notice note >}} + + 多集群环境中,只需要在主集群中进行配置。 + + {{}} + + +## 身份提供者 + +您可以在 `identityProviders` 部分中配置多个身份提供者(IdPs)。身份提供者会对用户进行认证,并向 KubeSphere 提供身份令牌。 + +KubeSphere 默认提供了以下几种类型的身份提供者: + +* [LDAP Identity Provider](../use-an-ldap-service) + +* [OIDC Identity Provider](../oidc-identity-provider) + +* [GitHub Identity Provider]() + +* [CAS Identity Provider]() + +* [Aliyun IDaaS Provider]() + +您也可以拓展 KubeSphere [OAuth2 认证插件](../use-an-oauth2-identity-provider) 与您的帐户系统进行集成。 diff --git a/content/zh/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md b/content/zh/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md new file mode 100644 index 000000000..249a4b70c --- /dev/null +++ b/content/zh/docs/access-control-and-account-management/external-authentication/use-an-ldap-service.md @@ -0,0 +1,104 @@ +--- +title: "LDAP身份提供者" +keywords: "LDAP, 身份提供者, 外部, 身份验证" +description: "如何使用 LDAP 服务。" + +linkTitle: "LDAP身份提供者" +weight: 12220 +--- + +本文档描述了如何使用 LDAP 服务作为外部身份提供者,允许您根据 LDAP 服务对用户进行身份验证。 + +## 准备工作 + +* 您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 +* 您需要获取 LDAP 服务的管理员专有名称(DN)和管理员密码。 + +## 步骤 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + + 示例: + + ```yaml + spec: + authentication: + jwtSecret: '' + maximumClockSkew: 10s + multipleLogin: true + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: LDAP + type: LDAPIdentityProvider + mappingMethod: auto + provider: + host: 192.168.0.2:389 + managerDN: uid=root,cn=users,dc=nas + managerPassword: ******** + userSearchBase: cn=users,dc=nas + loginAttribute: uid + mailAttribute: mail + ``` + +2. 在 `spec:authentication` 部分配置 `oauthOptions:identityProviders` 以外的字段信息请参阅[设置外部身份认证](../set-up-external-authentication/)。 + +3. 在 `oauthOptions:identityProviders` 部分配置字段。 + + * `name`: 用户定义的 LDAP 服务名称。 + * `type`: 必须将该值设置为 `LDAPIdentityProvider` 才能将 LDAP 服务用作身份提供者。 + * `mappingMethod`: 帐户映射方式,值可以是 `auto` 或者 `lookup`。 + * 如果值为 `auto`(默认),需要指定新的用户名。KubeSphere 根据用户名自动创建并关联 LDAP 用户。 + * 如果值为 `lookup`,需要执行步骤 4 以手动关联现有 KubeSphere 用户和 LDAP 用户。 + * `provider`: + * `host`: LDAP 服务的地址和端口号。 + * `managerDN`: 用于绑定到 LDAP 目录的 DN 。 + * `managerPassword`: `managerDN` 对应的密码。 + * `userSearchBase`: 用户搜索基。设置为所有 LDAP 用户所在目录级别的 DN 。 + * `loginAttribute`: 标识 LDAP 用户的属性。 + * `mailAttribute`: 标识 LDAP 用户的电子邮件地址的属性。 + +4. 如果 `mappingMethod` 设置为 `lookup`,可以运行以下命令并添加标签来进行帐户关联。如果 `mappingMethod` 是 `auto` 可以跳过这个部分。 + + ```bash + kubectl edit user + ``` + + ```yaml + labels: + iam.kubesphere.io/identify-provider: + iam.kubesphere.io/origin-uid: + ``` + +5. 字段配置完成后,保存修改,然后等待 ks-installer 完成重启。 + + {{< notice note >}} + + KubeSphere Web 控制台在 ks-installer 重新启动期间不可用。请等待重启完成。 + + {{}} + +6. 如果您使用 KubeSphere 3.2.0,请在配置 LDAP 之后执行以下命令并等待至 `ks-installer` 成功运行: + + ```bash + kubectl -n kubesphere-system set image deployment/ks-apiserver *=kubesphere/ks-apiserver:v3.2.1 + ``` + + {{< notice note >}} + + 如果您使用 KubeSphere 3.2.1,请跳过该步骤。 + + {{}} + +7. 进入KubeSphere登录页面,输入 LDAP 用户的用户名和密码登录。 + + {{< notice note >}} + + LDAP 用户的用户名是 `loginAttribute` 指定的属性值。 + + {{}} diff --git a/content/zh/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md b/content/zh/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md new file mode 100644 index 000000000..9d6e99594 --- /dev/null +++ b/content/zh/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider.md @@ -0,0 +1,130 @@ +--- +title: "OAuth 2.0身份提供者" +keywords: 'Kubernetes, KubeSphere, OAuth2, Identity Provider' +description: '如何使用外部 OAuth2 身份提供者。' +linkTitle: "OAuth 2.0身份提供者" +weight: 12230 +--- + +本文档介绍了如何使用基于 OAuth 2.0 协议的外部身份提供者。 + +下图显示了 KubeSphere 与外部 OAuth 2.0 身份提供者之间的身份验证过程。 + +![oauth2](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/oauth2.svg) + +## 准备工作 + +您需要部署一个 Kubernetes 集群,并在集群中安装 KubeSphere。有关详细信息,请参阅[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 + +## 开发 OAuth 2.0 插件 + +{{< notice note >}} + +KubeSphere 提供了两个内置的 OAuth 2.0 插件:GitHub 的 [GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) 和阿里云IDaaS的 [AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) ,可以根据内置的插件开发其他插件。 + +{{}} + +1. 在本地克隆 [KubeSphere](https://github.com/kubesphere/kubesphere) ,进入本地 KubeSphere 仓库,并在 `/pkg/apiserver/authentication/identityprovider/` 目录下创建一个插件的包。 + +2. 在插件包中,实现如下接口: + + ```go + // /pkg/apiserver/authentication/identityprovider/oauth_provider.go + type OAuthProvider interface { + // Exchange identity with a remote server. + IdentityExchange(code string) (Identity, error) + } + + type OAuthProviderFactory interface { + // Return the identity provider type. + Type() string + // Apply settings from kubesphere-config. + Create(options oauth.DynamicOptions) (OAuthProvider, error) + } + ``` + + ```go + // /pkg/apiserver/authentication/identityprovider/identity_provider.go + type Identity interface { + // (Mandatory) Return the identifier of the user at the identity provider. + GetUserID() string + // (Optional) Return the name of the user to be referred as on KubeSphere. + GetUsername() string + // (Optional) Return the email address of the user. + GetEmail() string + } + ``` + +3. 在插件包的 `init()` 函数中注册插件。 + + ```go + // Custom plugin package + func init() { + // Change to the actual name of the struct that + // implements the OAuthProviderFactory interface. + identityprovider.RegisterOAuthProvider(&{}) + } + ``` + +4. 在 `/pkg/apiserver/authentication/options/authenticate_options.go` 中导入插件包。 + + ```go + // Change to the actual name of your plugin package. + import ( + ... + _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/" + ... + ) + ``` + +5. [构建 ks-apiserver 镜像](https://github.com/kubesphere/community/blob/104bab42f67094930f2ca87c603b7c6365cd092a/developer-guide/development/quickstart.md) 并部署到您的集群中。 + +## 集成身份提供者 + +1. 以 `admin` 身份登录 KubeSphere,将光标移动到右下角 ,点击 **kubectl**,然后执行以下命令来编辑 CRD `ClusterConfiguration` 中的 `ks-installer`: + + ```bash + kubectl -n kubesphere-system edit cc ks-installer + ``` + +2. 在 `spec:authentication` 部分配置的 `oauthOptions:identityProviders` 以外的字段信息请参阅[设置外部身份认证](../set-up-external-authentication/)。 + +3. 根据开发的身份提供者插件来配置 `oauthOptions:identityProviders` 中的字段。 + + 以下是使用 GitHub 作为外部身份提供者的配置示例。详情请参阅 [GitHub 官方文档](https://docs.github.com/en/developers/apps/building-oauth-apps)和 [GitHubIdentityProvider 源代码](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) 。 + + ```yaml + spec: + authentication: + jwtSecret: '' + authenticateRateLimiterMaxTries: 10 + authenticateRateLimiterDuration: 10m0s + oauthOptions: + accessTokenMaxAge: 1h + accessTokenInactivityTimeout: 30m + identityProviders: + - name: github + type: GitHubIdentityProvider + mappingMethod: auto + provider: + clientID: '******' + clientSecret: '******' + redirectURL: 'https://ks-console/oauth/redirect/github' + ``` + + 同样,您也可以使用阿里云 IDaaS 作为外部身份提供者。详情请参阅[阿里云 IDaaS 文档](https://www.alibabacloud.com/help/product/111120.htm?spm=a3c0i.14898238.2766395700.1.62081da1NlxYV0)和 [AliyunIDaasProvider 源代码](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go)。 + +4. 字段配置完成后,保存修改,然后等待 ks-installer 完成重启。 + + {{< notice note >}} + + KubeSphere Web 控制台在 ks-installer 重新启动期间不可用。请等待重启完成。 + + {{}} + +5. 进入 KubeSphere 登录界面,点击 **Log In with XXX** (例如,**Log In with GitHub**)。 + +6. 在外部身份提供者的登录界面,输入身份提供者配置的用户名和密码,登录 KubeSphere 。 + + ![github-login-page](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/github-login-page.png) + diff --git a/content/zh/docs/access-control-and-account-management/ldap-identity-provider.md b/content/zh/docs/access-control-and-account-management/ldap-identity-provider.md deleted file mode 100644 index 6b86d6a45..000000000 --- a/content/zh/docs/access-control-and-account-management/ldap-identity-provider.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "LDAP身份提供者" -keywords: "LDAP, identity provider" -description: "How to configure authentication" - -linkTitle: "LDAP身份提供者" -weight: 12201 ---- - -## LDAP Authentication - -Set LDAPIdentityProvider in the identityProviders section to validate username and password against an LDAPv3 server using simple bind authentication. - -During authentication, the LDAP directory is searched for an entry that matches the provided username. If a single unique match is found, a simple bind is attempted using the DN of the entry plus the provided password. - -*Example Configuration Using LDAPIdentityProvider*: - -```yaml -apiVersion: installer.kubesphere.io/v1alpha1 -kind: ClusterConfiguration -metadata: - name: ks-installer -spec: - authentication: - jwtSecret: ******************************** - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m - oauthOptions: - accessTokenInactivityTimeout: 30m - accessTokenMaxAge: 1h - identityProviders: - - name: ldap - type: LDAPIdentityProvider - mappingMethod: auto - provider: - host: 192.168.0.2:389 - managerDN: uid=root,cn=users,dc=nas - managerPassword: ****** - userSearchBase: cn=users,dc=nas - loginAttribute: uid - mailAttribute: mail -``` - -For the above example: - -| Parameter | Description | -|-----------|-------------| -| insecureSkipVerify | Used to turn off TLS certificate checks. | -| startTLS | If specified, connections will use the ldaps:// protocol. | -| rootCA | Path to a trusted root certificate file. Default: use the host's root CA. | -| rootCAData | A raw certificate file can also be provided inline. Base64 encoded PEM file. | -| host | The name and port of the LDAP server. | -| managerDN | DN to use to bind during the search phase. | -| managerPassword | Password to use to bind during the search phase. | -| userSearchBase | The search base is the distinguished name (DN) of a level of the directory tree below which all users can be found. | -| userSearchFilter | LDAP filter used to identify objects of type user. e.g. (objectClass=person) | -| loginAttribute | User naming attributes identify user objects, will be mapped to KubeSphere account name. | -| mailAttribute | The mail attribute will be mapped to the KubeSphere account. | \ No newline at end of file diff --git a/content/zh/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md b/content/zh/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md index b08f91357..9f4ec8774 100644 --- a/content/zh/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md +++ b/content/zh/docs/access-control-and-account-management/multi-tenancy-in-kubesphere.md @@ -6,7 +6,6 @@ linkTitle: "KubeSphere 中的多租户" weight: 12100 --- - Kubernetes 解决了应用编排、容器调度的难题,极大地提高了资源的利用率。有别于传统的集群运维方式,在使用 Kubernetes 的过程中,企业和个人用户在资源共享和安全性方面均面临着诸多挑战。 首当其冲的就是企业环境中多租户形态该如何定义,租户的安全边界该如何划分。Kubernetes 社区[关于多租户的讨论](https://docs.google.com/document/d/1fj3yzmeU2eU8ZNBCUJG97dk_wC7228-e_MmdcmTNrZY)从未停歇,但到目前为止最终的形态尚无定论。 @@ -37,12 +36,10 @@ Kubernetes 解决了应用编排、容器调度的难题,极大地提高了资 与 Kubernetes 相同,KubeSphere 通过 RBAC 对用户的权限加以控制,实现逻辑层面的资源隔离。 -![rbac](/images/docs/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png) - KubeSphere 中的权限控制分为平台、企业空间、项目三个层级,通过角色来控制用户在不同层级的资源访问权限。 1. [平台角色](../../quick-start/create-workspace-and-project/):主要控制用户对平台资源的访问权限,如集群的管理、企业空间的管理、平台用户的管理等。 -2. [企业空间角色](../../workspace-administration/role-and-member-management/):主要控制企业空间成员在企业空间下的资源访问权限,如企业空间下项目、DevOps 工程的管理等。 +2. [企业空间角色](../../workspace-administration/role-and-member-management/):主要控制企业空间成员在企业空间下的资源访问权限,如企业空间下项目、DevOps 项目的管理等。 3. [项目角色](../../project-administration/role-and-member-management/):主要控制项目下资源的访问权限,如工作负载的管理、流水线的管理等。 ### 网络隔离 diff --git a/content/zh/docs/access-control-and-account-management/oidc-identity-provider.md b/content/zh/docs/access-control-and-account-management/oidc-identity-provider.md deleted file mode 100644 index 0e56d261f..000000000 --- a/content/zh/docs/access-control-and-account-management/oidc-identity-provider.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "OIDC身份提供者" -keywords: "OIDC, identity provider" -description: "How to configure authentication" - -linkTitle: "OIDC身份提供者" -weight: 12202 ---- - -## OIDC Identity Provider - -[OpenID Connect](https://openid.net/connect/) is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It uses straightforward REST/JSON message flows with a design goal of “making simple things simple and complicated things possible”. It’s uniquely easy for developers to integrate, compared to any preceding Identity protocol, such as Keycloak, Okta, Dex, Auth0, Gluu, and many more. - -*Example of using [Google Identity Platform](https://developers.google.com/identity/protocols/oauth2/openid-connect)*: - -```yaml -apiVersion: v1 -data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - jwtSecret: "********" - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: google - type: OIDCIdentityProvider - mappingMethod: auto - provider: - clientID: '********' - clientSecret: '********' - issuer: https://accounts.google.com - redirectURL: 'http://ks-console/oauth/redirect/google' -kind: ConfigMap -name: kubesphere-config -namespace: kubesphere-system -``` - -For the above example: - -| Parameter | Description | -| ----------| ----------- | -| clientID | The OAuth2 client ID. | -| clientSecret | The OAuth2 client secret. | -| redirectURL | The redirected URL to ks-console. | -| issuer | Defines how Clients dynamically discover information about OpenID Providers. | -| preferredUsernameKey | Configurable key which contains the preferred username claims. | -| emailKey | Configurable key which contains the email claims. | -| getUserInfo | GetUserInfo uses the userinfo endpoint to get additional claims for the token. This is especially useful where upstreams return "thin" id tokens. | -| insecureSkipVerify | Used to turn off TLS certificate verify. | \ No newline at end of file diff --git a/content/zh/docs/access-control-and-account-management/use-an-oauth2-identity-provider.md b/content/zh/docs/access-control-and-account-management/use-an-oauth2-identity-provider.md deleted file mode 100644 index f0db1ec2d..000000000 --- a/content/zh/docs/access-control-and-account-management/use-an-oauth2-identity-provider.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: "Use an OAuth 2.0 Identity Provider" -keywords: 'Kubernetes, KubeSphere, OAuth2, Identity Provider' -description: 'How to use an external OAuth2 identity provider.' -linkTitle: "Use an OAuth 2.0 Identity Provider" -weight: 12203 ---- - -This document describes how to use an external identity provider based on the OAuth 2.0 protocol. - -The following figure shows the authentication process between KubeSphere and an external OAuth 2.0 identity provider. - -![oauth2](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/oauth2.svg) - -## Prerequisites - -You need to deploy a Kubernetes cluster and install KubeSphere in the cluster. For details, see [Installing on Linux](/docs/installing-on-linux/) and [Installing on Kubernetes](/docs/installing-on-kubernetes/). - -## Develop an OAuth 2.0 Plugin - -{{< notice note >}} - -KubeSphere provides two built-in OAuth 2.0 plugins: [GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) for GitHub and [AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) for Alibaba Cloud IDaaS. You can develop other plugins according to the built-in plugins. - -{{}} - -1. Clone the [KubeSphere repository](https://github.com/kubesphere/kubesphere) on your local machine, go to the local KubeSphere repository, and create a package for your plugin in the `/pkg/apiserver/authentication/identityprovider/` directory. - -2. In the plugin package, implement the following interfaces: - - ```go - // /pkg/apiserver/authentication/identityprovider/oauth_provider.go - type OAuthProvider interface { - // Exchange identity with a remote server. - IdentityExchange(code string) (Identity, error) - } - - type OAuthProviderFactory interface { - // Return the identity provider type. - Type() string - // Apply settings from kubesphere-config. - Create(options oauth.DynamicOptions) (OAuthProvider, error) - } - ``` - - ```go - // /pkg/apiserver/authentication/identityprovider/identity_provider.go - type Identity interface { - // (Mandatory) Return the identifier of the user at the identity provider. - GetUserID() string - // (Optional) Return the name of the user to be referred as on KubeSphere. - GetUsername() string - // (Optional) Return the email address of the user. - GetEmail() string - } - ``` - -3. Register the plugin in the `init()` function of the plugin package. - - ```go - // Custom plugin package - func init() { - // Change to the actual name of the struct that - // implements the OAuthProviderFactory interface. - identityprovider.RegisterOAuthProvider(&{}) - } - ``` - -4. Import the plugin package in `/pkg/apiserver/authentication/options/authenticate_options.go`. - - ```go - // Change to the actual name of your plugin package. - import ( - ... - _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/" - ... - ) - ``` - -5. [Build the image of ks-apiserver](https://github.com/kubesphere/community/blob/104bab42f67094930f2ca87c603b7c6365cd092a/developer-guide/development/quickstart.md) and deploy it in your cluster. - -## Integrate an Identity Provider with KubeSphere - -1. Log in to KubeSphere as `admin`, move the cursor to in the bottom-right corner, click **Kubectl**, and run the following command to edit the `kubesphere-config` ConfigMap: - - ```bash - kubectl -n kubesphere-system edit cm kubesphere-config - ``` - -2. Configure fields other than `oauthOptions:identityProviders` in the `data:kubesphere.yaml:authentication` section. For details, see [Set Up External Authentication](../set-up-external-authentication/). - -3. Configure fields in `oauthOptions:identityProviders` section according to the identity provider plugin you have developed. - - The following is a configuration example that uses GitHub as an external identity provider. For details, see the [official GitHub documentation](https://docs.github.com/en/developers/apps/building-oauth-apps) and the [source code of the GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. - - ```yaml - apiVersion: v1 - data: - kubesphere.yaml: | - authentication: - authenticateRateLimiterMaxTries: 10 - authenticateRateLimiterDuration: 10m0s - jwtSecret: '******' - oauthOptions: - accessTokenMaxAge: 1h - accessTokenInactivityTimeout: 30m - identityProviders: - - name: github - type: GitHubIdentityProvider - mappingMethod: auto - provider: - clientID: '******' - clientSecret: '******' - redirectURL: 'https://ks-console/oauth/redirect/github' - ``` - - Similarly, you can also use Alibaba Cloud IDaaS as an external identity provider. For details, see the official [Alibaba IDaaS documentation](https://www.alibabacloud.com/help/product/111120.htm?spm=a3c0i.14898238.2766395700.1.62081da1NlxYV0) and the [source code of the AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/release-3.1/pkg/apiserver/authentication/identityprovider/github/github.go) plugin. - -4. After the `kubesphere-config` ConfigMap is modified, run the following command to restart ks-apiserver. - - ```bash - kubectl -n kubesphere-system rollout restart deploy/ks-apiserver - ``` - - {{< notice note >}} - - The KubeSphere web console is unavailable during the restart of ks-apiserver. Please wait until the restart is complete. - - {{}} - -5. Go to the KubeSphere login page, click **Log In with XXX** (for example, **Log In with GitHub**). - - ![github-login-page](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/kubesphere-login-page.png) - -6. On the login page of the external identity provider, enter the username and password of a user configured at the identity provider to log in to KubeSphere. - - ![github-login-page](/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/github-login-page.png) - diff --git a/content/zh/docs/application-store/app-developer-guide/helm-specification.md b/content/zh/docs/application-store/app-developer-guide/helm-specification.md index 5de516e15..c33f28596 100644 --- a/content/zh/docs/application-store/app-developer-guide/helm-specification.md +++ b/content/zh/docs/application-store/app-developer-guide/helm-specification.md @@ -20,7 +20,7 @@ chartname/ values.yaml # 该 Chart 的默认配置值。 values.schema.json # (可选)向 values.yaml 文件添加结构的 JSON Schema。 charts/ # 一个目录,包含该 Chart 所依赖的任意 Chart。 - crds/ # 自定义资源定义。 + crds/ # 定制资源定义。 templates/ # 模板的目录,若提供相应值便可以生成有效的 Kubernetes 配置文件。 templates/NOTES.txt # (可选)包含使用说明的纯文本文件。 ``` diff --git a/content/zh/docs/application-store/app-lifecycle-management.md b/content/zh/docs/application-store/app-lifecycle-management.md index bc8e795f8..663a14500 100644 --- a/content/zh/docs/application-store/app-lifecycle-management.md +++ b/content/zh/docs/application-store/app-lifecycle-management.md @@ -15,10 +15,16 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 本教程使用 [Redis](https://redis.io/) 作为示例应用程序,演示如何进行应用全生命周期管理,包括提交、审核、测试、发布、升级和下架。 +## 视频演示 + + + ## 准备工作 - 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目以及一个帐户 (`project-regular`)。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -26,31 +32,21 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 首先,您需要创建两个帐户,一个是 ISV 的帐户 (`isv`),另一个是应用技术审核员的帐户 (`reviewer`)。 -1. 使用 `admin` 帐户登录 KubeSphere 控制台。点击左上角的**平台管理**,选择**访问控制**。转到**帐户角色**,点击**创建**。 - - ![创建角色](/images/docs/zh-cn/appstore/application-lifecycle-management/create-role-2.PNG) +1. 使用 `admin` 帐户登录 KubeSphere 控制台。点击左上角的**平台管理**,选择**访问控制**。转到**平台角色**,点击**创建**。 2. 为角色设置一个名称,例如 `app-review`,然后点击**编辑权限**。 - ![设置名称](/images/docs/zh-cn/appstore/application-lifecycle-management/app-review-name-3.PNG) - 3. 转到**应用管理**,选择权限列表中的**应用商店管理**和**应用商店查看**,然后点击**确定**。 - ![创建角色](/images/docs/zh-cn/appstore/application-lifecycle-management/create-roles-4.PNG) - {{< notice note >}} - 被授予 `app-review` 角色的帐户能够查看平台上的应用商店并管理应用,包括审核和下架应用。 + 被授予 `app-review` 角色的用户能够查看平台上的应用商店并管理应用,包括审核和下架应用。 {{}} -4. 创建角色后,您需要创建一个帐户,并授予 `app-review` 角色。转到**帐户管理**,点击**创建**。输入必需的信息,然后点击**确定**。 +4. 创建角色后,您需要创建一个用户,并授予 `app-review` 角色。转到**用户**,点击**创建**。输入必需的信息,然后点击**确定**。 - ![创建审核帐户](/images/docs/zh-cn/appstore/application-lifecycle-management/create-review-role-5.PNG) - -5. 再创建另一个帐户 `isv`,把 `platform-regular` 角色授予它。 - - ![帐户已创建](/images/docs/zh-cn/appstore/application-lifecycle-management/account-ready-6.PNG) +5. 再创建另一个用户 `isv`,把 `platform-regular` 角色授予它。 6. 邀请上面创建好的两个帐户进入现有的企业空间,例如 `demo-workspace`,并授予它们 `workspace-admin` 角色。 @@ -58,17 +54,13 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 1. 以 `isv` 身份登录控制台,转到您的企业空间。您需要上传示例应用 Redis 至该企业空间,供后续使用。首先,下载应用 [Redis 11.3.4](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-11.3.4.tgz),然后转到**应用模板**,点击**上传模板**。 - ![上传应用](/images/docs/zh-cn/appstore/application-lifecycle-management/upload-app-7.PNG) - {{< notice note >}} 在本示例中,稍后会上传新版本的 Redis 来演示升级功能。 {{}} -2. 在弹出的对话框中,点击**上传 Helm 配置文件**上传 Chart 文件。点击**确定**继续。 - - ![上传模板](/images/docs/zh-cn/appstore/application-lifecycle-management/upload-template-8.PNG) +2. 在弹出的对话框中,点击**上传 Helm Chart** 上传 Chart 文件。点击**确定**继续。 3. **应用信息**下显示了应用的基本信息。要上传应用的图标,点击**上传图标**。您也可以跳过上传图标,直接点击**确定**。 @@ -78,23 +70,13 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( {{}} - ![上传图标](/images/docs/zh-cn/appstore/application-lifecycle-management/upload-icon-9.PNG) - 4. 成功上传后,模板列表中会列出应用,状态为**开发中**,意味着该应用正在开发中。上传的应用对同一企业空间下的所有成员均可见。 - ![应用开发中](/images/docs/zh-cn/appstore/application-lifecycle-management/app-draft-10.PNG) - -5. 点击列表中的 Redis 进入应用模板详情页面。您可以点击**编辑信息**来编辑该应用的基本信息。 - - ![编辑应用模板](/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-template-11.PNG) +5. 点击列表中的 Redis 进入应用模板详情页面。您可以点击**编辑**来编辑该应用的基本信息。 6. 您可以通过在弹出窗口中指定字段来自定义应用的基本信息。 - ![编辑应用信息](/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-information-12.PNG) - -7. 点击**确定**保存更改,然后您可以通过将其部署到 Kubernetes 来测试该应用程序。点击待提交版本展开菜单,选择**测试部署**。 - - ![测试部署](/images/docs/zh-cn/appstore/application-lifecycle-management/test-deployment-13.PNG) +7. 点击**确定**保存更改,然后您可以通过将其部署到 Kubernetes 来测试该应用程序。点击待提交版本展开菜单,选择**安装**。 {{< notice note >}} @@ -102,11 +84,7 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( {{}} -8. 选择要部署应用的集群和项目,为应用设置不同的配置,然后点击**部署**。 - - ![部署位置](/images/docs/zh-cn/appstore/application-lifecycle-management/deployment-place-14.PNG) - - ![部署应用](/images/docs/zh-cn/appstore/application-lifecycle-management/deploying-app-15.PNG) +8. 选择要部署应用的集群和项目,为应用设置不同的配置,然后点击**安装**。 {{< notice note >}} @@ -114,13 +92,9 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( {{}} -9. 稍等几分钟,切换到**部署实例**选项卡。您会看到 Redis 已经部署成功。 +9. 稍等几分钟,切换到**应用实例**选项卡。您会看到 Redis 已经部署成功。 - ![部署实例成功](/images/docs/zh-cn/appstore/application-lifecycle-management/deployed-instance-success-16.PNG) - -10. 测试应用并且没有发现问题后,便可以点击**提交审核**,提交该应用程序进行审核。 - - ![提交审核](/images/docs/zh-cn/appstore/application-lifecycle-management/submit-for-review-17.PNG) +10. 测试应用并且没有发现问题后,便可以点击**提交发布**,提交该应用程序进行发布。 {{< notice note >}} @@ -128,20 +102,15 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( {{}} -11. 应用提交后,它的状态会变成**等待审核**。现在,应用审核员便可以进行审核。 +11. 应用提交后,它的状态会变成**已提交**。现在,应用审核员便可以进行审核。 - ![应用已提交](/images/docs/zh-cn/appstore/application-lifecycle-management/submitted-app-18.PNG) -### 步骤三:审核应用程序 +### 步骤三:发布应用程序 -1. 登出控制台,然后以 `reviewer` 身份重新登录 KubeSphere。点击左上角的**平台管理**,选择**应用商店管理**。在**应用审核**页面,上一步中提交的应用会显示在**待处理**选项卡下。 - - ![应用待审核](/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-reviewed-19.PNG) +1. 登出控制台,然后以 `reviewer` 身份重新登录 KubeSphere。点击左上角的**平台管理**,选择**应用商店管理**。在**应用发布**页面,上一步中提交的应用会显示在**待发布**选项卡下。 2. 点击该应用进行审核,在弹出窗口中查看应用信息、介绍、配置文件和更新日志。 - ![审核中](/images/docs/zh-cn/appstore/application-lifecycle-management/reviewing-20.PNG) - 3. 审核员的职责是决定该应用是否符合发布至应用商店的标准。点击**通过**来批准,或者点击**拒绝**来拒绝提交的应用。 ### 步骤四:发布应用程序至应用商店 @@ -152,24 +121,18 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( ![应用模板页面](/images/docs/zh-cn/appstore/application-lifecycle-management/app-templates-page-21.PNG) -2. 在**应用审核**下,您可以查看应用状态。**已上架**意味着它在应用商店中可用。 - - ![应用已上架](/images/docs/zh-cn/appstore/application-lifecycle-management/app-active-22.PNG) +2. 在**应用发布**下,您可以查看应用状态。**已上架**意味着它在应用商店中可用。 3. 点击**在商店查看**转到应用商店的**应用信息**页面,或者点击左上角的**应用商店**也可以查看该应用。 - ![redis](/images/docs/zh-cn/appstore/application-lifecycle-management/redis-23.PNG) - {{< notice note >}} 您可能会在应用商店看到两个 Redis 应用,其中一个是 KubeSphere 中的内置应用。请注意,新发布的应用会显示在应用商店列表的开头。 {{}} -4. 现在,企业空间中的用户可以从应用商店中部署 Redis。要将应用部署至 Kubernetes,请点击应用转到**应用信息**页面,然后点击**部署**。 +4. 现在,企业空间中的用户可以从应用商店中部署 Redis。要将应用部署至 Kubernetes,请点击应用转到**应用信息**页面,然后点击**安装**。 - ![部署 redis](/images/docs/zh-cn/appstore/application-lifecycle-management/deploy-redis-24.PNG) - {{< notice note >}} 如果您在部署应用时遇到问题,**状态**栏显示为**失败**,您可以将光标移至**失败**图标上方查看错误信息。 @@ -182,12 +145,8 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 1. 以 `reviewer` 身份登录 KubeSphere。要创建分类,请转到**应用商店管理**页面,再点击**应用分类**页面中的 。 - ![应用分类](/images/docs/zh-cn/appstore/application-lifecycle-management/app-category-25.PNG) - 2. 在弹出的对话框中设置分类名称和图标,然后点击**确定**。对于 Redis,您可以将**分类名称**设置为 `Database`。 - ![设置应用分类](/images/docs/zh-cn/appstore/application-lifecycle-management/set-app-type-26.PNG) - {{< notice note >}} 通常,应用审核员会提前创建必要的分类,ISV 会选择应用所属的分类,然后提交审核。新创建的分类中没有应用。 @@ -196,35 +155,23 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 3. 创建好分类后,您可以给您的应用分配分类。在**未分类**中选择 Redis,点击**调整分类**。 - ![设置分类](/images/docs/zh-cn/appstore/application-lifecycle-management/set-category-for-app-27.PNG) - 4. 在弹出对话框的下拉列表中选择分类 (**Database**) 然后点击**确定**。 - ![确认分类](/images/docs/zh-cn/appstore/application-lifecycle-management/confirm-category-28.PNG) - 5. 该应用便会显示在对应分类中。 - ![分类显示](/images/docs/zh-cn/appstore/application-lifecycle-management/app-in-category-list-expected-29.PNG) ### 步骤六:添加新版本 要让企业空间用户能够更新应用,您需要先向 KubeSphere 添加新的应用版本。按照下列步骤为示例应用添加新版本。 -1. 再次以 `isv` 身份登录 KubeSphere,搜寻到**应用模板**,点击列表中的 Redis 应用。 +1. 再次以 `isv` 身份登录 KubeSphere,点击**应用模板**,点击列表中的 Redis 应用。 -2. 下载 [Redis 12.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-12.0.0.tgz),这是 Redis 的一个新版本,本教程用它来演示。在**版本**选项卡中点击右侧的**添加版本**,上传您刚刚下载的文件包。 +2. 下载 [Redis 12.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/redis-12.0.0.tgz),这是 Redis 的一个新版本,本教程用它来演示。在**版本**选项卡中点击右侧的**上传新版本**,上传您刚刚下载的文件包。 - ![新版本](/images/docs/zh-cn/appstore/application-lifecycle-management/new-version-redis-30.PNG) - -3. 点击**上传 Helm 配置文件**,上传完成后点击**确定**。 - - ![上传 redis 新版本](/images/docs/zh-cn/appstore/application-lifecycle-management/upload-new-redis-version-31.PNG) +3. 点击**上传 Helm Chart**,上传完成后点击**确定**。 4. 新的应用版本会显示在版本列表中。您可以通过点击来展开菜单并测试新的版本。另外,您也可以提交审核并发布至应用商店,操作步骤和上面说明的一样。 - ![上传新版本](/images/docs/zh-cn/appstore/application-lifecycle-management/uploaded-new-version-32.PNG) - - ![查看新版本](/images/docs/zh-cn/appstore/application-lifecycle-management/see-new-version-33.PNG) ### 步骤七:升级 @@ -238,16 +185,10 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 1. 以 `project-regular` 身份登录 KubeSphere,搜寻到项目的**应用**页面,点击要升级的应用。 - ![待升级应用](/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-upgraded-34.PNG) - -2. 点击**更多操作**,在下拉菜单中选择**编辑模板**。 - - ![编辑模板](/images/docs/zh-cn/appstore/application-lifecycle-management/edit-template-35.PNG) +2. 点击**更多操作**,在下拉菜单中选择**编辑设置**。 3. 在弹出窗口中,您可以查看应用配置 YAML 文件。在右侧的下拉列表中选择新版本,您可以自定义新版本的 YAML 文件。在本教程中,点击**更新**,直接使用默认配置。 - ![升级应用](/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-an-app-36.PNG) - {{< notice note >}} 您可以在右侧的下拉列表中选择与左侧相同的版本,通过 YAML 文件自定义当前应用的配置。 @@ -256,9 +197,6 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 4. 在**应用**页面,您会看到应用正在升级中。升级完成后,应用状态会变成**运行中**。 - ![版本升级](/images/docs/zh-cn/appstore/application-lifecycle-management/version-upgraded-37.PNG) - - ![升级完成](/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-finish-38.PNG) ### 步骤八:下架应用程序 @@ -266,12 +204,8 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 1. 以 `reviewer` 身份登录 KubeSphere。点击左上角的**平台管理**,选择**应用商店管理**。在**应用商店**页面,点击 Redis。 - ![下架应用](/images/docs/zh-cn/appstore/application-lifecycle-management/remove-app-39.PNG) - 2. 在详情页面,点击**下架应用**,在弹出的对话框中选择**确定**,确认将应用从应用商店下架的操作。 - ![应用下架](/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-app-40.PNG) - {{< notice note >}} 将应用从应用商店下架不影响正在使用该应用的租户。 @@ -280,12 +214,8 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 3. 要让应用再次在应用商店可用,点击**上架应用**。 - ![上架应用](/images/docs/zh-cn/appstore/application-lifecycle-management/activate-app-41.PNG) - 4. 要下架应用的特定版本,展开版本菜单,点击**下架版本**。在弹出的对话框中,点击**确定**以确认操作。 - ![下架版本](/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-version-42.PNG) - {{< notice note >}} 下架应用版本后,该版本在应用商店将不可用。下架应用版本不影响正在使用该版本的租户。 @@ -294,8 +224,6 @@ KubeSphere 集成了 [OpenPitrix](https://github.com/openpitrix/openpitrix)( 5. 要让应用版本再次在应用商店可用,点击**上架版本**。 - ![上架版本](/images/docs/zh-cn/appstore/application-lifecycle-management/activate-version-43.PNG) - diff --git a/content/zh/docs/application-store/built-in-apps/chaos-mesh-app.md b/content/zh/docs/application-store/built-in-apps/chaos-mesh-app.md new file mode 100644 index 000000000..0377f6505 --- /dev/null +++ b/content/zh/docs/application-store/built-in-apps/chaos-mesh-app.md @@ -0,0 +1,93 @@ +--- +title: "在 KubeSphere 中部署 Chaos Mesh" +keywords: 'KubeSphere, Kubernetes, Chaos Mesh, Chaos Engineering' +description: '了解如何在 KubeSphere 中部署 Chaos Mesh 并进行混沌实验。' +linkTitle: "部署 Chaos Mesh" +--- + +[Chaos Mesh](https://github.com/chaos-mesh/chaos-mesh) 是一个开源的云原生混沌工程平台,提供丰富的故障模拟类型,具有强大的故障场景编排能力,方便用户在开发测试中以及生产环境中模拟现实世界中可能出现的各类异常,帮助用户发现系统潜在的问题。 + +![Chaos Mesh architecture](/images/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-architecture-v2.png) + +本教程演示了如何在 KubeSphere 上部署 Chaos Mesh 进行混沌实验。 + +## **准备工作** + +* 部署 [KubeSphere 应用商店](https://kubesphere.io/zh/docs/pluggable-components/app-store/) +* 您需要为本教程创建一个企业空间、一个项目和两个帐户(ws-admin 和 project-regular)。帐户 ws-admin 必须在企业空间中被赋予 workspace-admin 角色,帐户 project-regular 必须被邀请至项目中赋予 operator 角色。若还未创建好,请参考[创建企业空间、项目、用户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 + + +## **开始混沌实验** + +### 步骤1: 部署 Chaos Mesh + +1. 使用 `project-regular` 身份登陆,在应用市场中搜索 `chaos-mesh`,点击搜索结果进入应用。 + + ![Chaos Mesh app](/images/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png) + + +2. 进入应用信息页后,点击右上角**安装**按钮。 + + ![Install Chaos Mesh](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png) + +3. 进入应用设置页面,可以设置应用**名称**(默认会随机一个唯一的名称)和选择安装的**位置**(对应的 Namespace) 和**版本**,然后点击右上角**下一步**。 + + ![Chaos Mesh basic information](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png) + +4. 根据实际需要编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 + + ![Chaos Mesh configurations](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png) + +5. 等待 Chaos Mesh 开始正常运行。 + + ![Chaos Mesh deployed](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png) + +6. 访问**应用负载**, 可以看到 Chaos Mesh 创建的三个部署。 + + ![Chaos Mesh deployments](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployments.png) + +### 步骤 2: 访问 Chaos Mesh + +1. 前往**应用负载**下服务页面,复制 chaos-dashboard 的 **NodePort**。 + + ![Chaos Mesh NodePort](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png) + +2. 您可以通过 `${NodeIP}:${NODEPORT}` 方式访问 Chaos Dashboard。并参考[管理用户权限](https://chaos-mesh.org/zh/docs/manage-user-permissions/)文档,生成 Token,并登陆 Chaos Dashboard。 + + ![Login to Chaos Dashboard](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png) + +### 步骤 3: 创建混沌实验 + +1. 在开始混沌实验之前,需要先确定并部署您的实验目标,比如,测试某应用在网络延时下的工作状态。本文使用了一个 demo 应用 `web-show` 作为待测试目标,观测系统网络延迟。 你可以使用下面命令部署一个 Demo 应用 `web-show` : + + ```bash + curl -sSL https://mirrors.chaos-mesh.org/latest/web-show/deploy.sh | bash + ``` + + {{< notice note >}} + + web-show 应用页面上可以直接观察到自身到 kube-system 命名空间下 Pod 的网络延迟。 + + {{}} + +2. 访问 **web-show** 应用程序。从您的网络浏览器,进入 `${NodeIP}:8081`。 + + ![Chaos Mesh web show app](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png) + +3. 登陆 Chaos Dashboard 创建混沌实验,为了更好的观察混沌实验效果,这里只创建一个独立的混沌实验,混沌实验的类型选择**网络攻击**,模拟网络延迟的场景: + + ![Chaos Dashboard](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png) + + 实验范围设置为 web-show 应用: + + ![Chaos Experiment scope](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png) + +4. 提交混沌实验后,查看实验状态: + + ![Chaos Experiment status](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/experiment-status.png) + +5. 访问 web-show 应用观察实验结果 : + + ![Chaos Experiment result](/images/zh-cn/docs/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png) + +更多详情参考 [Chaos Mesh 使用文档](https://chaos-mesh.org/zh/docs/)。 \ No newline at end of file diff --git a/content/zh/docs/application-store/built-in-apps/etcd-app.md b/content/zh/docs/application-store/built-in-apps/etcd-app.md index 48e57584d..25ba1ae7c 100644 --- a/content/zh/docs/application-store/built-in-apps/etcd-app.md +++ b/content/zh/docs/application-store/built-in-apps/etcd-app.md @@ -13,7 +13,7 @@ weight: 14210 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,31 +21,20 @@ weight: 14210 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![项目概览](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/project-overview-1.PNG) - -2. 找到 etcd,点击**应用信息**页面上的**部署**。 - - ![应用商店 etcd](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-app-store-2.PNG) - - ![部署 etcd](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deploy-etcd-3.PNG) +2. 找到 etcd,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 etcd 部署在 `demo-project` 中,点击**下一步**。 - ![部署位置](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deployment-location-4.PNG) - -4. 在**应用配置**页面,指定 etcd 的持久化存储卷大小,点击**部署**。 - - ![指定存储卷](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/specify-volume-5.PNG) +4. 在**应用设置**页面,指定 etcd 的持久化存储卷大小,点击**安装**。 {{< notice note >}} - 要指定 etcd 的更多值,请使用右上角的拨动开关查看 YAML 格式的应用清单文件,并编辑其配置。 + 要指定 etcd 的更多值,请使用右上角的**编辑YAML**查看 YAML 格式的应用清单文件,并编辑其配置。 {{}} -5. 在**应用**页面的**应用模板**选项卡下,稍等片刻待 etcd 启动并运行。 +5. 在**应用**页面的**基于模板的应用**选项卡下,稍等片刻待 etcd 启动并运行。 - ![etcd 运行中](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-running-6.PNG) ### 步骤 2:访问 etcd 服务 @@ -53,12 +42,8 @@ weight: 14210 1. 在**工作负载**的**有状态副本集**选项卡中,点击 etcd 的服务名称。 - ![etcd 有状态副本集](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-statefulset-7.PNG) - 2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 - ![etcd 终端](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-terminal-8.PNG) - 3. 在终端中,您可以直接读写数据。例如,分别执行以下两个命令。 ```bash @@ -69,8 +54,6 @@ weight: 14210 etcdctl get /name ``` - ![etcd 命令](/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-command-9.PNG) - 4. KubeSphere 集群内的客户端可以通过 `..svc.:2379`(例如本教程中是 `etcd-rscvf6.demo-project.svc.cluster.local:2379`) 访问 etcd 服务。 5. 有关更多信息,请参见 [etcd 官方文档](https://etcd.io/docs/v3.4.0/)。 diff --git a/content/zh/docs/application-store/built-in-apps/harbor-app.md b/content/zh/docs/application-store/built-in-apps/harbor-app.md index b803604ee..578bbe2d3 100644 --- a/content/zh/docs/application-store/built-in-apps/harbor-app.md +++ b/content/zh/docs/application-store/built-in-apps/harbor-app.md @@ -12,7 +12,7 @@ weight: 14220 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -20,18 +20,10 @@ weight: 14220 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/app-store-1.PNG) - -2. 找到 Harbor,点击**应用信息**页面上的**部署**。 - - ![寻找 Harbor](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/find-harbor-2.PNG) - - ![点击部署](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/click-deploy-3.PNG) +2. 找到 Harbor,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 Harbor 部署在 `demo-project` 中,点击**下一步**。 - ![部署 Harbor](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/deploy-harbor-4.PNG) - 4. 在**应用配置**页面,编辑 Harbor 的配置文件,请注意以下字段。 `type`:访问 Harbor 服务的方式。本示例使用 `nodePort`。 @@ -40,8 +32,6 @@ weight: 14220 `externalURL`:暴露给租户的 URL。 - ![配置 Harbor](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/harbor-config-5.PNG) - {{< notice note >}} - 请指定 `externalURL`,如果您访问 Harbor 有问题,该字段会对解决问题非常有用。 @@ -50,11 +40,10 @@ weight: 14220 {{}} - 配置编辑完成后,点击**部署**继续。 + 配置编辑完成后,点击**安装**继续。 5. 稍等片刻待 Harbor 启动并运行。 - ![创建 Harbor](/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/creating-harbor-6.PNG) ### 步骤 2:访问 Harbor diff --git a/content/zh/docs/application-store/built-in-apps/memcached-app.md b/content/zh/docs/application-store/built-in-apps/memcached-app.md index b4482507a..7e1ab5d4d 100644 --- a/content/zh/docs/application-store/built-in-apps/memcached-app.md +++ b/content/zh/docs/application-store/built-in-apps/memcached-app.md @@ -12,7 +12,7 @@ weight: 14230 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -20,35 +20,20 @@ weight: 14230 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/in-app-store-1.PNG) - -2. 找到 Memcached,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 Memcached](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-app-store-2.PNG) - - ![部署 Memcached](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deploying-memcached-3.PNG) +2. 找到 Memcached,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 Memcached 部署在 `demo-project` 中,点击**下一步**。 - ![部署确认](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deployment-confirm-4.PNG) - -4. 在**应用配置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**部署**继续。 - - ![编辑配置](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/edit-config-5.PNG) +4. 在**应用配置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**安装**继续。 5. 稍等片刻待 Memcached 启动并运行。 - ![Memcached 运行中](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-running-6.PNG) ### 步骤 2:访问 Memcached 1. 在**服务**页面点击 Memcached 的服务名称。 - ![Memcached 服务](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-service-7.PNG) - -2. 在详情页面,您可以分别在**服务端口**和**容器组**下找到端口号和 Pod IP。 - - ![Memcached 端口和 Pod](/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-port-pod-8.PNG) +2. 在详情页面,您可以分别在**端口**和**容器组**下找到端口号和 Pod IP。 3. Memcached 服务是 Headless 服务,因此在集群内通过 Pod IP 和端口号访问它。Memcached `telnet` 命令的基本语法是 `telnet HOST PORT`。例如: diff --git a/content/zh/docs/application-store/built-in-apps/minio-app.md b/content/zh/docs/application-store/built-in-apps/minio-app.md index 7ae1ad3de..3c299f4c5 100644 --- a/content/zh/docs/application-store/built-in-apps/minio-app.md +++ b/content/zh/docs/application-store/built-in-apps/minio-app.md @@ -12,7 +12,7 @@ weight: 14240 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -20,25 +20,14 @@ weight: 14240 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![Minio 应用](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-app-1.PNG) - -2. 找到 MinIO,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 Minio](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-app-store-2.PNG) - - ![deploy-minio](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-3.PNG) +2. 找到 MinIO,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 MinIO 部署在 `demo-project` 中,点击**下一步**。 - ![部署 Minio](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-deploy-4.PNG) - -4. 在**应用配置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**部署**继续。 - - ![部署 Minio 2](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-5.PNG) +4. 在**应用配置**页面,您可以使用默认配置或者直接编辑 YAML 文件来自定义配置。点击**安装**继续。 5. 稍等片刻待 MinIO 启动并运行。 - ![列表中的 Minio](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-list-6.PNG) ### 步骤 2:访问 MinIO Browser @@ -46,26 +35,14 @@ weight: 14240 1. 在**服务**页面点击 MinIO 的服务名称。 - ![Minio 详情](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-detail-7.PNG) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. 在**访问模式**的下拉列表中选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![编辑外网访问](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/edit-internet-access-8.PNG) - -3. 在**访问方式**的下拉列表中选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![nodeport](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/nodeport-9.PNG) - -4. 您可以在**服务端口**中查看已暴露的端口。 - - ![已暴露的端口](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/port-exposed-10.PNG) +4. 您可以在**端口**中查看已暴露的端口。 5. 要访问 MinIO Browser,您需要 `accessKey` 和 `secretKey`,都在 MinIO 配置文件中指定。在**应用**的**应用模板**选项卡中,点击 MinIO,随后可以在**配置文件**选项卡下查找这两个字段的值。 - ![模板列表](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/template-list-11.PNG) - - ![配置文件](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/config-file-12.PNG) - 6. 通过 `:` 使用 `accessKey` 和 `secretKey` 访问 MinIO Browser。 ![Minio Browser](/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-browser-13.PNG) diff --git a/content/zh/docs/application-store/built-in-apps/mongodb-app.md b/content/zh/docs/application-store/built-in-apps/mongodb-app.md index d7e744fca..5569cd13f 100644 --- a/content/zh/docs/application-store/built-in-apps/mongodb-app.md +++ b/content/zh/docs/application-store/built-in-apps/mongodb-app.md @@ -13,7 +13,7 @@ weight: 14250 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,42 +21,27 @@ weight: 14250 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/app-store-1.PNG) - -2. 找到 MongoDB,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 Mongodb](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-in-app-store-2.PNG) - - ![部署 Mongodb](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/deploy-mongodb-3.PNG) +2. 找到 MongoDB,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 MongoDB 部署在 `demo-project` 中,点击**下一步**。 - ![确认部署](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/confirm-deployment-4.PNG) - -4. 在**应用配置**页面,为该应用指定持久化存储卷,并记录用户名和密码用于访问该应用。操作完成后,点击**部署**。 - - ![设置应用配置](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/set-app-configuration-5.PNG) +4. 在**应用配置**页面,为该应用指定持久化存储卷,并记录用户名和密码用于访问该应用。操作完成后,点击**安装**。 {{< notice note >}} - 要为 MongoDB 指定更多值,请打开右上角的拨动开关查看 YAML 格式的应用清单文件,编辑其配置。 + 要为 MongoDB 指定更多值,请打开右上角的**编辑YAML**开关查看 YAML 格式的应用清单文件,编辑其配置。 {{}} 5. 稍等片刻待 MongoDB 启动并运行。 - ![Mongodb 运行中](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-running-6.PNG) ### 步骤 2:访问 MongoDB 终端 1. 转到**服务**页面,点击 MongoDB 的服务名称。 - ![Mongodb 服务](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-7.PNG) - 2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 - ![Mongodb 终端](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-terminal-8.PNG) - 3. 在弹出窗口中,直接向终端输入命令使用该应用。 ![Mongodb 服务终端](/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-terminal-9.PNG) diff --git a/content/zh/docs/application-store/built-in-apps/mysql-app.md b/content/zh/docs/application-store/built-in-apps/mysql-app.md index eaee3f11e..16042ca1a 100644 --- a/content/zh/docs/application-store/built-in-apps/mysql-app.md +++ b/content/zh/docs/application-store/built-in-apps/mysql-app.md @@ -13,7 +13,7 @@ weight: 14260 ## 准备工作 - 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该帐户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,39 +21,23 @@ weight: 14260 1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 - ![go-to-app-store](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/go-to-app-store.png) - -2. 找到 MySQL,在**应用信息**页面点击**部署**。 - - ![find-mysql](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/find-mysql.png) - - ![click-deploy](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/click-deploy.png) +2. 找到 MySQL,在**应用信息**页面点击**安装**。 3. 设置应用名称和版本,确保 MySQL 部署在 `demo-project` 项目中,然后点击**下一步**。 - ![deploy-mysql](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/deploy-mysql.png) - -4. 在**应用配置**页面,取消对 `mysqlRootPassword` 字段的注释并设置密码,然后点击**部署**。 - - ![uncomment-password](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/uncomment-password.png) +4. 在**应用配置**页面,取消对 `mysqlRootPassword` 字段的注释并设置密码,然后点击**安装**。 5. 等待 MySQL 创建完成并开始运行。 - ![mysql-running](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-running.png) ### 步骤 2:访问 MySQL 终端 1. 打开**工作负载**页面并点击 MySQL 的工作负载名称。 - ![mysql-workload](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-workload.png) - 2. 在**容器组**区域,展开容器详情,点击终端图标。 - ![mysql-terminal](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-terminal.png) - 3. 在终端窗口中,执行 `mysql -uroot -ptesting` 命令以 `root` 用户登录 MySQL。 - ![log-in-mysql](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/log-in-mysql.png) ### 步骤 3:从集群外访问 MySQL 数据库 @@ -61,19 +45,11 @@ weight: 14260 1. 打开**服务**页面并点击 MySQL 的服务名称。 - ![mysql-service](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-service.png) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![edit-internet-access](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/edit-internet-access.png) - -3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![nodeport-mysql](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/nodeport-mysql.png) - -4. 您可以在**服务端口**区域查看暴露的端口。该端口号和公网 IP 地址将在下一步用于访问 MySQL 数据库。 - - ![mysql-port-number](/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-port-number.png) +4. 您可以在**端口**区域查看暴露的端口。该端口号和公网 IP 地址将在下一步用于访问 MySQL 数据库。 5. 您需要使用 MySQL Client 或第三方应用(例如 SQLPro Studio)才能访问 MySQL 数据库。以下演示如何使用 SQLPro Studio 访问 MySQL 数据库。 diff --git a/content/zh/docs/application-store/built-in-apps/nginx-app.md b/content/zh/docs/application-store/built-in-apps/nginx-app.md index 4a206f093..6d65cd23e 100644 --- a/content/zh/docs/application-store/built-in-apps/nginx-app.md +++ b/content/zh/docs/application-store/built-in-apps/nginx-app.md @@ -13,7 +13,7 @@ weight: 14270 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,23 +21,11 @@ weight: 14270 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/app-store-1.PNG) - -2. 找到 NGINX,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 Nginx](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-in-app-store-2.PNG) - - ![部署 Nginx](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/deploy-nginx-3.PNG) +2. 找到 NGINX,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 NGINX 部署在 `demo-project` 中,点击**下一步**。 - ![确认部署](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/confirm-deployment-4.PNG) - -4. 在**应用配置**页面,指定要为该应用部署的副本数量,根据需要启用应用路由 (Ingress)。操作完成后,点击**部署**。 - - ![编辑 Nginx 配置](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-config-nginx-5.PNG) - - ![清单文件](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/manifest-file-6.PNG) +4. 在**应用配置**页面,指定要为该应用部署的副本数量,根据需要启用应用路由 (Ingress)。操作完成后,点击**安装**。 {{< notice note >}} @@ -47,7 +35,6 @@ weight: 14270 5. 稍等片刻待 NGINX 启动并运行。 - ![Nginx 运行中](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-running-7.PNG) ### 步骤 2:访问 NGINX @@ -55,19 +42,11 @@ weight: 14270 1. 转到**服务**页面,点击 NGINX 的服务名称。 - ![Nginx 服务](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-service-8.PNG) +2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. **访问模式**选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![编辑外网访问](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-internet-access-9.PNG) - -3. **访问方式**选择 **NodePort**,然后点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![nodeport](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nodeport-10.PNG) - -4. 在**服务端口**下,您可以查看已暴露的端口。 - - ![暴露端口](/images/docs/zh-cn/appstore/built-in-apps/nginx-app/exposed-port-11.PNG) +4. 在**端口**下,您可以查看已暴露的端口。 5. 通过 `:` 访问 NGINX。 diff --git a/content/zh/docs/application-store/built-in-apps/postgresql-app.md b/content/zh/docs/application-store/built-in-apps/postgresql-app.md index d7eaa0fd3..e754d9d11 100644 --- a/content/zh/docs/application-store/built-in-apps/postgresql-app.md +++ b/content/zh/docs/application-store/built-in-apps/postgresql-app.md @@ -13,7 +13,7 @@ weight: 14280 ## 准备工作 - 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该帐户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,31 +21,20 @@ weight: 14280 1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 - ![click-app-store](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/click-app-store.png) - -2. 找到 PostgreSQL,在**应用信息**页面点击**部署**。 - - ![postgresql-in-app-store](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png) - - ![deploy-postgresql](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql.png) +2. 找到 PostgreSQL,在**应用信息**页面点击**安装**。 3. 设置应用名称和版本,确保 PostgreSQL 部署在 `demo-project` 项目中,然后点击**下一步**。 - ![deploy-postgresql-2](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png) - -4. 在**应用配置**页面,为应用设置持久卷,记录用户名和密码用于后续访问应用,然后点击**部署**。 - - ![set-config](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/set-config.png) +4. 在**应用配置**页面,为应用设置持久卷,记录用户名和密码用于后续访问应用,然后点击**安装**。 {{< notice note >}} - 如需为 PostgreSQL 设置更多的参数,可点击 **YAML** 后的切换开关打开应用的 YAML 清单文件,并在清单文件中设置相关参数。 + 如需为 PostgreSQL 设置更多的参数,可点击 **编辑YAML** 开关打开应用的 YAML 清单文件,并在清单文件中设置相关参数。 {{}} 5. 等待 PostgreSQL 创建完成并开始运行。 - ![postgresql-ready](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-ready.png) ### 步骤 2:访问 PostgreSQL 数据库 @@ -53,26 +42,14 @@ weight: 14280 1. 打开**服务**页面并点击 PostgreSQL 的服务名称。 - ![access-postgresql](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/access-postgresql.png) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![edit-internet-access](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/edit-internet-access.png) - -3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![nodeport](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/nodeport.png) - -4. 您可以在**服务端口**区域查看暴露的端口。该端口将在下一步中用于访问 PostgreSQL 数据库。 - - ![port-number](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/port-number.png) +4. 您可以在**端口**区域查看暴露的端口。该端口将在下一步中用于访问 PostgreSQL 数据库。 5. 在**容器组**区域,展开容器详情,点击终端图标。在弹出的窗口中直接输入命令访问数据库。 - ![container-terminal](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/container-terminal.png) - - ![postgresql-output](/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-output.png) - {{< notice note >}} 您也可以使用第三方应用例如 SQLPro Studio 连接数据库。取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 diff --git a/content/zh/docs/application-store/built-in-apps/rabbitmq-app.md b/content/zh/docs/application-store/built-in-apps/rabbitmq-app.md index fde548e51..8db8aa3a2 100644 --- a/content/zh/docs/application-store/built-in-apps/rabbitmq-app.md +++ b/content/zh/docs/application-store/built-in-apps/rabbitmq-app.md @@ -13,7 +13,7 @@ weight: 14290 ## 准备工作 - 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户。该帐户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,33 +21,20 @@ weight: 14290 1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 - ![rabbitmq01](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png) - -2. 找到 RabbitMQ,在**应用信息**页面点击**部署**。 - - ![find-rabbitmq](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png) - - ![click-deploy](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png) +2. 找到 RabbitMQ,在**应用信息**页面点击**安装**。 3. 设置应用名称和版本,确保 RabbitMQ 部署在 `demo-project` 项目中,然后点击**下一步**。 - ![rabbitmq03](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png) - -4. 在**应用配置**页面,您可以直接使用默认配置,也可以通过修改表单参数或编辑 YAML 文件自定义配置。您需要记录 **Root Username** 和 **Root Password** 的值,用于在后续步骤中登录系统。设置完成后点击**部署**。 - - ![rabbitMQ11](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png) - - ![rabbitMQ04](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png) +4. 在**应用配置**页面,您可以直接使用默认配置,也可以通过修改表单参数或编辑 YAML 文件自定义配置。您需要记录 **Root Username** 和 **Root Password** 的值,用于在后续步骤中登录系统。设置完成后点击**安装**。 {{< notice tip >}} - 如需查看清单文件,请点击 **YAML** 开关。 + 如需查看清单文件,请点击 **编辑YAML** 开关。 {{}} 5. 等待 RabbitMQ 创建完成并开始运行。 - ![check-if-rabbitmq-is-running](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png) ### 步骤 2:访问 RabbitMQ 主页 @@ -55,19 +42,11 @@ weight: 14290 1. 打开**服务**页面并点击 RabbitMQ 的服务名称。 - ![go-to-services](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![rabbitmq07](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png) - -3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![rabbitmq08](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png) - -4. 您可以在**服务端口**区域查看暴露的端口。 - - ![rabbitmq09](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png) +4. 您可以在**端口**区域查看暴露的端口。 5. 用 `:` 地址以及步骤 1 中记录的用户名和密码访问 RabbitMQ 的 **management** 端口。 ![rabbitmq-dashboard](/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.png) diff --git a/content/zh/docs/application-store/built-in-apps/radondb-mysql-app.md b/content/zh/docs/application-store/built-in-apps/radondb-mysql-app.md index ec67d8295..5492b0d7d 100644 --- a/content/zh/docs/application-store/built-in-apps/radondb-mysql-app.md +++ b/content/zh/docs/application-store/built-in-apps/radondb-mysql-app.md @@ -13,7 +13,7 @@ weight: 14293 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,34 +21,21 @@ weight: 14293 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 -2. 找到 RadonDB MySQL,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 RadonDB MySQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png) - - ![部署 RadonDB MySQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png) +2. 找到 RadonDB MySQL,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 RadonDB MySQL 部署在 `demo-project` 中,点击**下一步**。 - ![确认部署](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png) - -4. 在**应用配置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**部署**继续。 - - ![设置应用配置](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png) +4. 在**应用配置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**安装**继续。 5. 稍等片刻待 RadonDB MySQL 启动并运行。 - ![RadonDB MySQL 运行中](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png) ### 步骤 2:访问 RadonDB MySQL 1. 进入**应用负载**下的**服务**页面,点击 RadonDB MySQL 服务名称。 - ![RadonDB MySQL 服务](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png) - 2. 在**容器组**下,展开菜单查看容器详情,然后点击**终端**图标。 - ![RadonDB MySQL 终端](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png) - 3. 在弹出窗口中,直接向终端输入命令使用该应用。 ![访问 RadonDB MySQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service-terminal.png) diff --git a/content/zh/docs/application-store/built-in-apps/radondb-postgresql-app.md b/content/zh/docs/application-store/built-in-apps/radondb-postgresql-app.md index 44979302d..a96167f4b 100644 --- a/content/zh/docs/application-store/built-in-apps/radondb-postgresql-app.md +++ b/content/zh/docs/application-store/built-in-apps/radondb-postgresql-app.md @@ -13,7 +13,7 @@ weight: 14294 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,49 +21,29 @@ weight: 14294 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店中的 RadonDB PostgreSQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png) - -2. 找到 RadonDB PostgreSQL,点击**应用信息**页面上的**部署**。 - - ![部署 RadonDB PostgreSQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png) +2. 找到 RadonDB PostgreSQL,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 RadonDB PostgreSQL 部署在 `demo-project` 中,点击**下一步**。 - ![确认部署](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png) - -4. 在**应用配置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**部署**继续。 - - ![设置应用配置](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png) +4. 在**应用配置**页面,您可以使用默认配置,或者编辑 YAML 文件以自定义配置。点击**安装**继续。 5. 稍等片刻待 RadonDB PostgreSQL 启动并运行。 - ![RadonDB PostgreSQL 运行中](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png) ### 步骤 2:查看 PostgreSQL 集群状态 1. 在 `demo-project` 项目的**概览**页面,可查看当前项目资源使用情况。 - ![project-overview](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/project-overview.png) - 2. 进入**应用负载**下的**工作负载**页面,点击**有状态副本集**,查看集群状态。 - ![statefulsets-running](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png) - 进入一个有状态副本集群详情页面,点击**监控**标签页,可查看一定时间范围内的集群指标。 - ![statefulset-monitoring](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png) - 3. 进入**应用负载**下的**容器组**页面,可查看所有状态的容器。 - ![pods-running](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/pods-running.png) - -4. 进入**存储管理**下的**存储卷**页面,可查看存储卷,所有组件均使用了持久化存储。 - - ![volumes](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volumes.png) +4. 进入**存储**下的**存储卷**页面,可查看存储卷,所有组件均使用了持久化存储。 查看某个存储卷用量信息,以其中一个数据节点为例,可以看到当前存储的存储容量和剩余容量等监控数据。 - ![volume-status](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volume-status.png) ### 步骤 3:访问 RadonDB PostgreSQL @@ -71,14 +51,10 @@ weight: 14294 2. 在**资源状态**页面,点击**终端**图标。 - ![RadonDB PostgreSQL 终端](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png) - 3. 在弹出窗口中,向终端输入命令使用该应用。 ```bash psql -h -p 5432 -U postgres -d postgres ``` - ![访问 RadonDB PostgreSQL](/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service-terminal.png) - 4. 如果您想从集群外部访问 RadonDB PostgreSQL,详细信息请参见 [RadonDB PostgreSQL 开源项目](https://github.com/radondb/radondb-postgresql-kubernetes)。 diff --git a/content/zh/docs/application-store/built-in-apps/redis-app.md b/content/zh/docs/application-store/built-in-apps/redis-app.md index fd21ddc7e..56bde2db7 100644 --- a/content/zh/docs/application-store/built-in-apps/redis-app.md +++ b/content/zh/docs/application-store/built-in-apps/redis-app.md @@ -13,7 +13,7 @@ weight: 14291 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,21 +21,11 @@ weight: 14291 1. 在 `demo-project` 项目的**概览**页面,点击左上角的**应用商店**。 - ![应用商店](/images/docs/zh-cn/appstore/built-in-apps/redis-app/app-store-1.PNG) - -2. 找到 Redis,点击**应用信息**页面上的**部署**。 - - ![应用商店中的 Redis](/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-in-app-store-2.PNG) - - ![部署 Redis](/images/docs/zh-cn/appstore/built-in-apps/redis-app/deploy-redis-3.PNG) +2. 找到 Redis,点击**应用信息**页面上的**安装**。 3. 设置名称并选择应用版本。请确保将 Redis 部署在 `demo-project` 中,点击**下一步**。 - ![确认部署](/images/docs/zh-cn/appstore/built-in-apps/redis-app/confirm-deployment-4.PNG) - -4. 在**应用配置**页面,为应用指定持久化存储卷和密码。操作完成后,点击**部署**。 - - ![配置 Redis](/images/docs/zh-cn/appstore/built-in-apps/redis-app/config-redis-5.PNG) +4. 在**应用配置**页面,为应用指定持久化存储卷和密码。操作完成后,点击**安装**。 {{< notice note >}} @@ -45,20 +35,13 @@ weight: 14291 5. 稍等片刻待 Redis 启动并运行。 - ![redis 运行中](/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-running-6.PNG) ### 步骤 2:访问 Redis 终端 1. 转到**服务**页面,点击 Redis 的服务名称。 - ![访问 Redis](/images/docs/zh-cn/appstore/built-in-apps/redis-app/access-redis-7.PNG) - 2. 在**容器组**中展开菜单查看容器详情,随后点击**终端**图标。 - ![Redis 终端](/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-terminal-8.PNG) - 3. 在弹出窗口的终端中运行 `redis-cli` 命令来使用该应用。 - ![使用 Redis](/images/docs/zh-cn/appstore/built-in-apps/redis-app/use-redis-9.PNG) - 4. 有关更多信息,请参见 [Redis 官方文档](https://redis.io/documentation)。 diff --git a/content/zh/docs/application-store/built-in-apps/tomcat-app.md b/content/zh/docs/application-store/built-in-apps/tomcat-app.md index 4babb7505..ffbb8da5e 100644 --- a/content/zh/docs/application-store/built-in-apps/tomcat-app.md +++ b/content/zh/docs/application-store/built-in-apps/tomcat-app.md @@ -13,7 +13,7 @@ weight: 14292 ## 准备工作 - 您需要[启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该帐户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`)。该用户必须是已邀请至项目的平台普通用户,并且在项目中的角色为 `operator`。在本教程中,您需要以 `project-regular` 用户登录,并在 `demo-workspace` 企业空间的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -21,39 +21,23 @@ weight: 14292 1. 在 `demo-project` 的**概览**页面,点击左上角的**应用商店**。 - ![go-to-app-store](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-app01.png) - -2. 找到 Tomcat,在**应用信息**页面点击**部署**。 - - ![find-tomcat](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/find-tomcat.png) - - ![click-deploy](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-deploy.png) +2. 找到 Tomcat,在**应用信息**页面点击**安装**。 3. 设置应用名称和版本,确保 Tomcat 部署在 `demo-project` 项目中,然后点击**下一步**。 - ![click-next](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-next.png) - -4. 在**应用配置**页面,您可以直接使用默认配置,也可以通过编辑 YAML 文件自定义配置。设置完成后点击**部署**。 - - ![deploy-tomcat](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/deploy-tomcat.png) +4. 在**应用配置**页面,您可以直接使用默认配置,也可以通过编辑 YAML 文件自定义配置。设置完成后点击**安装**。 5. 等待 Tomcat 创建完成并开始运行。 - ![tomcat-running](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-running.png) ### 步骤 2:访问 Tomcat 终端 1. 打开**服务**页面并点击 Tomcat 的服务名称。 - ![click-tomcat-service](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-tomcat-service.png) - 2. 在**容器组**区域,展开容器详情,点击终端图标。 - ![tomcat-terminal-icon](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-terminal-icon.png) - 3. 在 `/usr/local/tomcat/webapps` 目录下查看部署的项目。 - ![view-project](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/view-project.png) ### 步骤 3:用浏览器访问 Tomcat 项目 @@ -61,19 +45,11 @@ weight: 14292 1. 打开**服务**页面并点击 Tomcat 的服务名称。 - ![click-tomcat-service](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-tomcat-service.png) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 +3. 将**访问模式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![edit-internet-access](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/edit-internet-access.png) - -3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - - ![nodeport](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/nodeport.png) - -4. 您可以在**服务端口**区域查看暴露的端口。 - - ![exposed-port](/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/exposed-port.png) +4. 您可以在**端口**区域查看暴露的端口。 5. 在浏览器中用 `:/sample` 地址访问 Tomcat 示例项目。 diff --git a/content/zh/docs/application-store/external-apps/deploy-clickhouse.md b/content/zh/docs/application-store/external-apps/deploy-clickhouse.md index 4dc0bc640..3d8537978 100644 --- a/content/zh/docs/application-store/external-apps/deploy-clickhouse.md +++ b/content/zh/docs/application-store/external-apps/deploy-clickhouse.md @@ -13,7 +13,7 @@ weight: 14340 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户帐户 (`project-regular`) 供本教程操作使用。该帐户需要是平台普通用户,并邀请至项目中赋予 `operator` 角色作为项目操作员。本教程中,请以 `project-regular` 身份登录控制台,在企业空间 `demo-workspace` 中的 `demo-project` 项目中进行操作。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 请确保 KubeSphere 项目网关已开启外网访问。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 ## 动手实验 @@ -23,7 +23,7 @@ weight: 14340 1. 以 `admin` 身份登录 KubeSphere 的 Web 控制台,并使用**工具箱**中的 **Kubectl** 执行以下命令来安装 ClickHouse Operator。建议至少准备 2 个可用集群节点。 ```bash - kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml ``` {{< notice note >}} @@ -35,10 +35,12 @@ weight: 14340 **预期结果** ```powershell - customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.altinity.com configured - customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.altinity.com created - customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.altinity.com created + $ kubectl apply -f https://raw.githubusercontent.com/radondb/radondb-clickhouse-kubernetes/main/clickhouse-operator-install.yml + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallations.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseinstallationtemplates.clickhouse.radondb.com created + customresourcedefinition.apiextensions.k8s.io/clickhouseoperatorconfigurations.clickhouse.radondb.com created serviceaccount/clickhouse-operator created + clusterrole.rbac.authorization.k8s.io/clickhouse-operator-kube-system created clusterrolebinding.rbac.authorization.k8s.io/clickhouse-operator-kube-system created configmap/etc-clickhouse-operator-files created configmap/etc-clickhouse-operator-confd-files created @@ -52,67 +54,49 @@ weight: 14340 2. 执行如下命令可查看 ClickHouse Operator 资源状态。 ```bash - kubectl get all --selector=app=clickhouse-operator -n kube-system + $ kubectl get all --selector=app=clickhouse-operator -n kube-system ``` **预期结果** ``` NAME READY STATUS RESTARTS AGE pod/clickhouse-operator-644fcb8759-9tfcx 2/2 Running 0 4m32s - + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/clickhouse-operator-metrics ClusterIP 10.96.72.49 8888/TCP 4m32s - + NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/clickhouse-operator 1/1 1 1 4m32s - + NAME DESIRED CURRENT READY AGE replicaset.apps/clickhouse-operator-644fcb8759 1 1 1 4m32s - + ``` ### 步骤 2:添加应用仓库 -1. 以 `ws-admin` 身份登录 KubeSphere 的 Web 控制台。在企业空间中,进入**应用管理**下的**应用仓库**页面,点击**添加仓库**。 - - ![add-repo](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-repo.png) +1. 以 `ws-admin` 身份登录 KubeSphere 的 Web 控制台。在企业空间中,进入**应用管理**下的**应用仓库**页面,点击**添加**。 2. 在出现的对话框中,输入 `clickhouse` 作为应用仓库名称,输入 `https://radondb.github.io/radondb-clickhouse-kubernetes/` 作为仓库的 URL。点击**验证**以验证 URL。在 URL 旁边呈现一个绿色的对号,验证通过后,点击**确定**继续。 - ![add-clickhouse](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-clickhouse.png) - 3. 将仓库成功导入到 KubeSphere 之后,在列表中可查看 ClickHouse 仓库。 - ![repo-added](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/repo-added.png) ### 步骤 3:部署 ClickHouse 集群 -1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台。在 `demo-project` 项目中,进入**应用负载**下的**应用**页面,点击**部署新应用**。 +1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台。在 `demo-project` 项目中,进入**应用负载**下的**应用**页面,点击**创建**。 - ![click-deploy-new-app](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png) - -2. 在对话框中,选择**来自应用模板**。 - - ![from-app-templates](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/from-app-templates.png) +2. 在对话框中,选择**从应用模板**。 3. 从下拉菜单中选择 `clickhouse` 应用仓库 ,然后点击 **clickhouse-cluster**。 - ![clickhouse-cluster](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png) - -4. 在**配置文件**选项卡,可以直接通过控制台查看配置信息,也可以通过下载默认 `values.yaml` 文件查看。在**版本**列框下,选择一个版本号,点击**部署**以继续。 +4. 在**Chart 文件**选项卡,可以直接通过控制台查看配置信息,也可以通过下载默认 `values.yaml` 文件查看。在**版本**列框下,选择一个版本号,点击**安装**以继续。 - ![chart-tab](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/chart-tab.png) - 5. 在**基本信息**页面,确认应用名称、应用版本以及部署位置。点击**下一步**以继续。 - ![basic-info](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/basic-info.png) - -6. 在**应用配置**页面,可以编辑 `values.yaml` 文件,也可以直接点击**部署**使用默认配置。 - - ![click-deploy](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy.png) +6. 在**应用配置**页面,可以编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 7. 等待 ClickHouse 集群正常运行。可在**工作负载**下的**应用**页面,查看部署的应用。 - ![app-running](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/app-running.png) ### 步骤 4:查看 ClickHouse 集群状态 @@ -120,27 +104,16 @@ weight: 14340 2. 进入**应用负载**下的**工作负载**页面,点击**有状态副本集**,查看集群状态。 - ![statefulsets-running](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulsets-running.png) - 进入一个有状态副本集群详情页面,点击**监控**标签页,可查看一定时间范围内的集群指标。 - ![statefulset-monitoring](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png) - 3. 进入**应用负载**下的**容器组**页面,可查看所有状态的容器。 - ![pods-running](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/pods-running.png) - -4. 进入**存储管理**下的**存储卷**页面,可查看存储卷,所有组件均使用了持久化存储。 - - ![volumes](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volumes.png) +4. 进入**存储**下的**存储卷**页面,可查看存储卷,所有组件均使用了持久化存储。 查看某个存储卷用量信息,以其中一个数据节点为例,可以看到当前存储的存储容量和剩余容量等监控数据。 - ![volume-status](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volume-status.png) - 5. 在项目**概览**页面,可查看当前项目资源使用情况。 - ![project-overview](/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/project-overview.png) ### 步骤 5:访问 ClickHouse 集群 @@ -149,7 +122,7 @@ weight: 14340 2. 打开终端窗口,执行如下命令,并输入 ClickHouse 集群用户名和密码。 ```bash - kubectl edit chi -n + $ kubectl edit chi -n ``` {{< notice note >}} @@ -163,7 +136,7 @@ weight: 14340 3. 执行如下命令,访问 ClickHouse 集群,并可通过 `show databases` 命令查看数据库。 ```bash - kubectl exec -it -n -- clickhouse-client --user= --password= + $ kubectl exec -it -n -- clickhouse-client --user= --password= ``` {{< notice note >}} diff --git a/content/zh/docs/application-store/external-apps/deploy-gitlab.md b/content/zh/docs/application-store/external-apps/deploy-gitlab.md index bca2f3a94..d7cc2ddb6 100644 --- a/content/zh/docs/application-store/external-apps/deploy-gitlab.md +++ b/content/zh/docs/application-store/external-apps/deploy-gitlab.md @@ -13,47 +13,32 @@ weight: 14310 ## 准备工作 - 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 ### 步骤 1:添加应用仓库 -1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加仓库**。 - - ![add-repo](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-repo.png) +1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 2. 在出现的对话框中,输入 `main` 作为应用仓库名称,输入 `https://charts.kubesphere.io/main` 作为应用仓库 URL。点击**验证**来验证 URL,如果可用,则会在 URL 右侧看到一个绿色的对号。点击**确定**继续操作。 - ![add-main-repo](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-main-repo.png) - 3. 仓库成功导入到 KubeSphere 后,会显示在列表里。 - ![added-main-repo](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/added-main-repo.png) ### 步骤 2:部署 GitLab -1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**部署新应用**。 +1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**创建**。 - ![deploy-app](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deploy-app.png) - -2. 在出现的对话框中,选择**来自应用模板**。 - - ![from-app-templates](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/from-app-templates.png) +2. 在出现的对话框中,选择**从应用模板**。 3. 从下拉菜单中选择 `main`,然后点击 **gitlab**。 - ![click-gitlab](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/click_gitlab.png) - -4. 在**应用信息**选项卡和**配置文件**选项卡,可以看到控制台的默认配置。点击**部署**继续。 - - ![view-config](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/view_config.png) +4. 在**应用信息**选项卡和**Chart 文件**选项卡,可以看到控制台的默认配置。点击**安装**继续。 5. 在**基本信息**页面,可以看到应用名称、应用版本以及部署位置。本教程使用 `4.2.3 [13.2.2]` 版本。点击**下一步**继续。 - ![basic-info](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/basic_info.png) - -6. 在**应用配置**页面,使用以下配置替换默认配置,然后点击**部署**。 +6. 在**应用设置**页面,使用以下配置替换默认配置,然后点击**安装**。 ```yaml global: @@ -67,8 +52,6 @@ weight: 14310 enabled: false ``` - ![change-value](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/change_value.png) - {{< notice note >}} `demo-project` 指的是部署 GitLab 的项目名称,请确保使用您自己的项目名称。 @@ -77,14 +60,8 @@ weight: 14310 7. 等待 GitLab 正常运行。 - ![gitlab-running](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/gitlab-running.png) - 8. 访问**工作负载**,可以看到为 GitLab 创建的所有部署和有状态副本集。 - ![deployments-running](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deployments-running.png) - - ![statefulsets-running](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/statefulsets-running.png) - {{< notice note >}} 可能需要过一段时间才能看到所有部署和有状态副本集正常运行。 @@ -93,13 +70,10 @@ weight: 14310 ### 步骤 3:获取 root 用户的密码 -1. 访问**配置中心**的密钥,在搜索栏输入 `gitlab-initial-root-password`,然后按下键盘上的**回车键**来搜索密钥。 - - ![search-secret](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-secret.png) +1. 选择**配置** > **保密字典**,在搜索栏输入 `gitlab-initial-root-password`,然后按下键盘上的**回车键**来搜索密钥。 2. 点击密钥访问其详情页,然后点击右上角的 查看密码。请确保将密码进行复制。 - ![password](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/initial-password.png) ### 步骤 4:编辑 hosts 文件 @@ -128,8 +102,6 @@ weight: 14310 1. 访问**应用负载**下的**服务**,在搜索栏输入 `nginx-ingress-controller`,然后按下键盘上的**回车键**搜索该服务,可以看到通过端口 `31246` 暴露的服务,您可以使用该端口访问 GitLab。 - ![search-service](/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-service.png) - {{< notice note >}} 在不同控制台上显示的端口号可能不同,请您确保使用自己的端口号。 diff --git a/content/zh/docs/application-store/external-apps/deploy-metersphere.md b/content/zh/docs/application-store/external-apps/deploy-metersphere.md index eafe8c276..1f02202c4 100644 --- a/content/zh/docs/application-store/external-apps/deploy-metersphere.md +++ b/content/zh/docs/application-store/external-apps/deploy-metersphere.md @@ -13,60 +13,37 @@ MeterSphere 是一站式的开源企业级连续测试平台,涵盖测试跟 ## 准备工作 - 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要为本教程创建一个企业空间、一个项目以及两个帐户(`ws-admin` 和 `project-regular`)。在企业空间中,`ws-admin` 帐户必须被赋予 `workspace-admin` 角色,`project-regular` 帐户必须被赋予 `operator` 角色。如果还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## **动手实验** ### 步骤 1:添加应用仓库 -1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加仓库**。 - - ![add-repo](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-repo.png) +1. 以 `ws-admin` 身份登录 KubeSphere。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 2. 在出现的对话框中,输入 `metersphere` 作为应用仓库名称,输入 `https://charts.kubesphere.io/test` 作为应用仓库 URL。点击**验证**来验证 URL,如果可用,则会在 URL 右侧看到一个绿色的对号。点击**确定**继续操作。 - ![add-metersphere-repo](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-metersphere-repo.png) - 3. 仓库成功导入到 KubeSphere 后,会显示在列表里。 - ![added-metersphere-repo](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/added-metersphere-repo.png) ### 步骤 2:部署 MeterSphere -1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**部署新应用**。 +1. 登出 KubeSphere,再以 `project-regular` 登录。在您的项目中,访问**应用负载**下的**应用**,然后点击**创建**。 - ![deploy-app](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deploy-app.png) - -2. 在出现的对话框中,选择**来自应用模板**。 - - ![from-app-templates](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/from-app-templates.png) +2. 在出现的对话框中,选择**从应用模板**。 3. 从下拉菜单中选择 `metersphere`,然后点击 **metersphere-chart**。 - ![click-metersphere](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/click-metersphere.png) - -4. 在**应用信息**选项卡和**配置文件**选项卡,可以看到控制台的默认配置。点击**部署**继续。 - - ![view-config](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/view-config.png) +4. 在**应用信息**选项卡和**Chart 文件**选项卡,可以看到控制台的默认配置。点击**安装**继续。 5. 在**基本信息**页面,可以看到应用名称、应用版本以及部署位置。点击**下一步**继续。 - ![basic-info](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/basic-info.png) - -6. 在**应用配置**页面,将 `imageTag` 的值从 `master` 改为 `v1.6`,然后点击**部署**。 - - ![change-value](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/change-value.png) +6. 在**应用配置**页面,将 `imageTag` 的值从 `master` 改为 `v1.6`,然后点击**安装**。 7. 等待 MeterSphere 应用正常运行。 - ![metersphere-running](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-running.png) - 8. 访问**工作负载**,可以看到为 MeterSphere 创建的所有部署和有状态副本集。 - ![deployments-running](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deployments-running.png) - - ![statefulsets-running](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/statefulsets-running.png) - {{< notice note >}} 可能需要过一段时间才能看到所有部署和有状态副本集正常运行。 @@ -77,8 +54,6 @@ MeterSphere 是一站式的开源企业级连续测试平台,涵盖测试跟 1. 问**应用负载**下的**服务**,可以看到 MeterSphere 服务,其服务类型默认设置为 `NodePort`。 - ![metersphere-service](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-service.png) - 2. 您可以通过 `:` 使用默认帐户及密码 (`admin/metersphere`) 访问 MeterSphere。 ![login-metersphere](/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/login-metersphere.png) diff --git a/content/zh/docs/application-store/external-apps/deploy-tidb.md b/content/zh/docs/application-store/external-apps/deploy-tidb.md index f17ac77e9..e72627bfe 100644 --- a/content/zh/docs/application-store/external-apps/deploy-tidb.md +++ b/content/zh/docs/application-store/external-apps/deploy-tidb.md @@ -14,7 +14,7 @@ weight: 14320 - 您需要准备至少 3 个可调度的节点。 - 您需要启用 [OpenPitrix 系统](../../../pluggable-components/app-store/)。 -- 您需要为本教程创建一个企业空间、一个项目和两个帐户(`ws-admin` 和 `project-regular`)。帐户 `ws-admin` 必须在企业空间中被赋予 `workspace-admin` 角色,帐户 `project-regular` 必须被邀请至项目中赋予 `operator` 角色。若还未创建好,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要为本教程创建一个企业空间、一个项目和两个帐户(`ws-admin` 和 `project-regular`)。帐户 `ws-admin` 必须在企业空间中被赋予 `workspace-admin` 角色,帐户 `project-regular` 必须被邀请至项目中赋予 `operator` 角色。若还未创建好,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## **动手实验** @@ -40,80 +40,50 @@ weight: 14320 ### 步骤 2:添加应用仓库 -1. 登出 KubeSphere,再以 `ws-admin` 身份登录。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加仓库**。 - - ![add-repo](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.png) +1. 登出 KubeSphere,再以 `ws-admin` 身份登录。在企业空间中,访问**应用管理**下的**应用仓库**,然后点击**添加**。 2. 在出现的对话框中,输入 `pingcap` 作为应用仓库名称,输入 `https://charts.pingcap.org` 作为 PingCAP Helm 仓库的 URL。点击**验证**以验证 URL,如果可用,您将会在 URL 旁边看到一个绿色的对号。点击**确定**以继续。 - ![add-pingcap-repo](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.png) - 3. 将仓库成功导入到 KubeSphere 之后,它将显示在列表中。 - ![added-pingcap-repo](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.png) ### 步骤 3:部署 TiDB Operator -1. 登出 KubeSphere,再以 `project-regular` 身份登录。在您的项目中,访问**应用负载**下的**应用**,点击**部署新应用**。 +1. 登出 KubeSphere,再以 `project-regular` 身份登录。在您的项目中,访问**应用负载**下的**应用**,点击**创建**。 - ![deploy-app](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.png) - -2. 在出现的对话框中,选择**来自应用模板**。 - - ![from-app-templates](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.png) +2. 在出现的对话框中,选择**从应用模板**。 3. 从下拉菜单中选择 `pingcap`,然后点击 **tidb-operator**。 - ![click-tidb-operator](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.png) - {{< notice note >}} 本教程仅演示如何部署 TiDB Operator 和 TiDB 集群。您也可以按需部署其他工具。 {{}} -4. 在**配置文件**选项卡,您可以直接从控制台查看配置,也可以通过点击右上角的图标以下载默认 `values.yaml` 文件。在**版本**下,从下拉菜单中选择一个版本号,点击**部署**。 - - ![select-version](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.png) +4. 在**Chart 文件**选项卡,您可以直接从控制台查看配置,也可以通过点击右上角的图标以下载默认 `values.yaml` 文件。在**版本**下,从下拉菜单中选择一个版本号,点击**安装**。 5. 在**基本信息**页面,确认应用名称、应用版本以及部署位置。点击**下一步**以继续。 - ![basic-info](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.png) - -6. 在**应用配置**页面,您可以编辑 `values.yaml` 文件,也可以直接点击**部署**使用默认配置。 - - ![check-config-file](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.png) +6. 在**应用配置**页面,您可以编辑 `values.yaml` 文件,也可以直接点击**安装**使用默认配置。 7. 等待 TiDB Operator 正常运行。 - ![tidb-operator-running](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.png) - 8. 访问**工作负载**,可以看到为 TiDB Operator 创建的两个部署。 - ![tidb-deployment](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.png) ### 步骤 4:部署 TiDB 集群 部署 TiDB 集群的过程与部署 TiDB Operator 的过程相似。 -1. 访问**应用负载**下的**应用**,再次点击**部署新应用**,然后选择**来自应用模板**。 - - ![deploy-app-again](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.png) - - ![from-app-templates-2](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.png) +1. 访问**应用负载**下的**应用**,再次点击**创建**,然后选择**从应用模板**。 2. 在 PingCAP 仓库中,点击 **tidb-cluster**。 - ![click-tidb-cluster](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.png) - -3. 在**配置文件**选项卡,可以查看配置和下载 `values.yaml` 文件。点击**部署**以继续。 - - ![download-yaml-file](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.png) +3. 在**Chart 文件**选项卡,可以查看配置和下载 `values.yaml` 文件。点击**安装**以继续。 4. 在**基本信息**页面,确认应用名称、应用版本和部署位置。点击**下一步**以继续。 - ![tidb-cluster-info](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.png) - 5. 一些 TiDB 组件需要[持久卷](../../../cluster-administration/persistent-volume-and-storage-class/)。您可以运行以下命令查看存储类型。 ``` @@ -126,9 +96,7 @@ weight: 14320 csi-super-high-perf csi-qingcloud Delete Immediate true 71m ``` -6. 在**应用配置**页面,将所有 `storageClassName` 字段的默认值从 `local-storage` 更改为您的存储类型名称。例如,您可以根据以上输出将这些默认值更改为 `csi-standard`。 - - ![tidb-cluster-config](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.png) +6. 在**应用设置**页面,将所有 `storageClassName` 字段的默认值从 `local-storage` 更改为您的存储类型名称。例如,您可以根据以上输出将这些默认值更改为 `csi-standard`。 {{< notice note >}} @@ -136,20 +104,15 @@ weight: 14320 {{}} -7. 点击**部署**,然后就可以在列表中看到如下所示的两个应用: +7. 点击**安装**,然后就可以在列表中看到安装的应用。 - ![tidb-cluster-app-running](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.png) ### 步骤 5:查看 TiDB 集群状态 1. 访问**应用负载**下的**工作负载**,确认所有的 TiDB 集群部署都在正常运行。 - ![tidb-cluster-deployments-running](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.png) - 2. 切换到**有状态副本集**选项卡,可以看到 TiDB、TiKV 和 PD 均正常运行。 - ![tidb-statefulsets](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.png) - {{< notice note >}} TiKV 和 TiDB 会自动创建,可能要过一段时间才能在列表中显示。 @@ -158,44 +121,21 @@ weight: 14320 3. 点击单个有状态副本集以访问其详情页。在**监控**选项卡下,可以看到一段时间内以折线图显示的指标。 - TiDB 指标: - - ![tidb-metrics](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.png) - - TiKV 指标: - - ![tikv-metrics](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.png) - - PD 指标: - - ![pd-metrics](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.png) - 4. 在**应用负载**下的**容器组**中,可以看到 TiDB 集群包含两个 TiDB Pod、三个 TiKV Pod 和三个 PD Pod。 - ![tidb-pod-list](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.png) +5. 在**存储**下的**存储卷**中,可以看到 TiKV 和 PD 都在使用持久卷。 -5. 在**存储管理**下的**存储卷**中,可以看到 TiKV 和 PD 都在使用持久卷。 - - ![tidb-storage-usage](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.png) - -6. 同时,也会监控存储卷的使用情况。点击一个存储卷以访问其详情页。以 TiKV 为例: - - ![tikv-volume-status](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.png) +6. 同时,也会监控存储卷的使用情况。点击一个存储卷以访问其详情页。 7. 在项目的**概览**页面,可以看到当前项目的资源使用情况列表。 - ![tidb-project-resource-usage](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.png) ### 步骤 6: 访问 TiDB 集群 1. 访问**应用负载**下的**服务**,可以看到所有服务的详细信息。由于服务类型默认设置为 `NodePort`,因此您可以通过集群外部的 Node IP 地址进行访问。 - ![tidb-service](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.png) - 3. TiDB 集成了 Prometheus 和 Grafana 以监控数据库集群的性能。例如,您可以通过 `:` 访问 Grafana 以查看指标。 - ![tidb-service-grafana](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.png) - ![tidb-grafana](/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.png) {{< notice note >}} diff --git a/content/zh/docs/cluster-administration/application-resources-monitoring.md b/content/zh/docs/cluster-administration/application-resources-monitoring.md index 9db696ecc..a8840c3e9 100644 --- a/content/zh/docs/cluster-administration/application-resources-monitoring.md +++ b/content/zh/docs/cluster-administration/application-resources-monitoring.md @@ -7,29 +7,23 @@ weight: 8300 --- -除了在物理资源级别监控数据外,集群管理员还需要密切跟踪整个平台上的应用资源,例如项目和 DevOps 工程的数量,以及特定类型的工作负载和服务的数量。**应用资源监控**提供了平台的资源使用情况和应用级趋势的汇总信息。 +除了在物理资源级别监控数据外,集群管理员还需要密切跟踪整个平台上的应用资源,例如项目和 DevOps 项目的数量,以及特定类型的工作负载和服务的数量。**应用资源**提供了平台的资源使用情况和应用级趋势的汇总信息。 ## 准备工作 -您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 ## 使用情况 1. 点击左上角的**平台管理**,然后选择**集群管理**。 - ![Platform](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/platform.png) -2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了 Member 集群,您可以选择一个集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 - ![Clusters Management](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/clusters-management.png) +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 3. 在左侧导航栏选择**监控告警**下的**应用资源**以查看应用资源概览,包括集群中所有资源使用情况的汇总信息。 - - ![application-resources-monitoring1](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/application-resources-monitoring1.png) -4. **集群资源使用情况**和**应用资源用量**提供最近 7 天的监控数据,并支持自定义时间范围查询。 - ![Time Range](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/time-range.png) -5. 点击特定资源以查看特定时间段内的使用详情和趋势,例如**集群资源使用情况**下的 **CPU**。在详情页面,您可以按项目查看特定的监控数据,以及自定义时间范围查看资源的确切使用情况。 - ![Cluster Resources Usage](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png) +4. **集群资源用量**和**应用资源用量**提供最近 7 天的监控数据,并支持自定义时间范围查询。 + +5. 点击特定资源以查看特定时间段内的使用详情和趋势,例如**集群资源用量**下的 **CPU**。在详情页面,您可以按项目查看特定的监控数据,以及自定义时间范围查看资源的确切使用情况。 ## 用量排行 -**用量排行**支持按照资源使用情况对项目进行排序,帮助平台管理员了解当前集群中每个项目的资源使用情况,包括 **CPU 使用量**、**内存使用量**、**容器组 (Pod) 数量**、**网络流出速率**和**网络流入速率**。您可以选择下拉列表中的任一指标对项目按升序或降序进行排序。此功能可以帮助您快速定位大量消耗 CPU 或内存资源的应用程序 (Pod)。 -![Usage Ranking](/images/docs/zh-cn/cluster-administration/application-resources-monitoring/usage-ranking.png) \ No newline at end of file +**用量排行**支持按照资源使用情况对项目进行排序,帮助平台管理员了解当前集群中每个项目的资源使用情况,包括 **CPU 用量**、**内存用量**、**容器组数量**、**网络流出速率**和**网络流入速率**。您可以选择下拉列表中的任一指标对项目按升序或降序进行排序。此功能可以帮助您快速定位大量消耗 CPU 或内存资源的应用程序(容器组)。 \ No newline at end of file diff --git a/content/zh/docs/cluster-administration/cluster-settings/cluster-gateway.md b/content/zh/docs/cluster-administration/cluster-settings/cluster-gateway.md new file mode 100644 index 000000000..82bdc4e9b --- /dev/null +++ b/content/zh/docs/cluster-administration/cluster-settings/cluster-gateway.md @@ -0,0 +1,84 @@ +--- +title: "集群网关" +keywords: 'KubeSphere, Kubernetes, 集群, 网关, NodePort, LoadBalancer' +description: '学习如何在 KubeSphere 中创建集群级别的网关。' +linkTitle: "集群网关" +weight: 8630 + +--- + +KubeSphere v3.2.x 提供集群级别的网关,使所有项目共用一个全局网关。本文档介绍如何在 KubeSphere 设置集群网关。 + +## 准备工作 + +您需要创建一个拥有 `platform-admin` 角色的用户,例如:`admin`。有关更多信息,请参见[创建企业空间、项目、用户和平台角色](../../../quick-start/create-workspace-and-project/). + +## 创建集群网关 + +1. 以 `admin` 身份登录 web 控制台,点击左上角的**平台管理**并选择**集群管理**。 + +2. 点击导航面板中**集群设置**下的**网关设置**,选择**集群网关**选项卡,并点击**开启网关**。 + +3. 在显示的对话框中,从以下的两个选项中选择网关的访问模式: + + - **NodePort**:通过网关使用对应节点端口来访问服务。NodePort 访问模式提供以下配置: + - **链路追踪**:打开**链路追踪**开关以启用 KubeSphere 的链路追踪功能。功能开启后,如应用路由不可访问,请检查是否为应用路由是否添加注解(`nginx.ingress.kubernetes.io/service-upstream: true`)。如注解没有添加,则添加注解至您的应用路由中。 + - **配置选项**:在集群网关中加入键值对。 + - **LoadBalacer**:通过网关使用单个 IP 地址访问服务。LoadBalancer 访问模式提供以下配置: + - **链路追踪**:打开**链路追踪**开关以启用 KubeSphere 的链路追踪功能。功能开启后,如应用路由不可访问,请检查是否为应用路由是否添加注解(`nginx.ingress.kubernetes.io/service-upstream: true`)。如注解没有添加,则添加注解至您的应用路由中。 + - **负载均衡器提供商**:从下拉列表中选择负载均衡器提供商。 + - **注解**:添加注解至集群网关。 + - **配置选项**: 添加键值对至集群网关。 + + {{< notice info >}} + + - 为了使用链路追踪功能,请在创建自制应用时打开**应用治理**。 + - 有关如何使用配置选项的更多信息,请参见 [Configuration options](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options)。 + + {{}} + +4. 点击**确定**创建集群网关。 + +5. 在这个页面中会展示创建的集群网关和该网关的基本信息。 + + {{< notice note >}} + + 同时还创建了名为 kubesphere-router-kubesphere-system 的网关,作为集群中所有项目的全局网关。 + + {{}} + +6. 点击**管理**,从下拉菜单中选择一项操作: + + - **查看详情**:转至集群网关详情页面。 + - **编辑**:编辑集群网关配置。 + - **关闭**:关闭集群网关。 + +7. 创建集群网关后,有关如何创建应用路由的更多信息,请参见[应用路由](../../../project-user-guide/application-workloads/routes/#create-a-route)。 + +## 集群网关详情页面 + +1. 在**集群网关**选项卡下,点击集群网关右侧的**管理**,选择**查看详情**以打开其详情页面。 +2. 在详情页面,点击**编辑**以配置集群网关,或点击**更多操作**以选择操作。 +3. 点击**监控**选项卡,查看集群网关的监控指标。 +4. 点击**配置选项**选项卡以查看集群网关的配置选项。 +5. 点击**网关日志**选项卡以查看集群网关日志。 +6. 点击**资源状态**选项卡,以查看集群网关的负载状态。点击 按钮,以增加或减少副本数量。 +7. 点击**元数据**选项卡,以查看集群网关的注解。 + +## 查看项目网关 + +在**网关设置**页面,点击**项目网关**选项卡,以查看项目网关。 + +点击项目网关右侧的 ,从下拉菜单中选择操作: + +- **编辑**:编辑项目网关的配置。 +- **关闭**:关闭项目网关。 + +{{< notice note >}} + +如果在创建集群网关之前存在项目网关,则项目网关地址可能会在集群网关地址和项目网关地址之间切换。建议您只使用集群网关或项目网关。 + +{{}} + +关于如何创建项目网关的更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 + diff --git a/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md b/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md index f1e1af345..0b5c6b61c 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md +++ b/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md @@ -12,31 +12,25 @@ weight: 8610 ## 准备工作 * 您需要启用[多集群功能](../../../multicluster-management/)。 -* 您需要有一个企业空间和一个拥有创建企业空间权限的帐户,例如 `ws-manager`。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +* 您需要有一个企业空间和一个拥有创建企业空间权限的帐户,例如 `ws-manager`。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 设置集群可见性 ### 在创建企业空间时选择可用集群 -1. 使用拥有创建企业空间权限的帐户登录 KubeSphere,例如 `ws-manager`。 +1. 使用拥有创建企业空间权限的用户登录 KubeSphere,例如 `ws-manager`。 2. 点击左上角的**平台管理**,选择**访问控制**。在左侧导航栏选择**企业空间**,然后点击**创建**。 - ![创建企业空间](/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.PNG) - 3. 输入企业空间的基本信息,点击**下一步**。 -4. 在**集群选择**页面,您可以看到可用的集群列表,选择要分配给企业空间的集群并点击**创建**。 - - ![选择集群](/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-a-cluster.PNG) +4. 在**集群设置**页面,您可以看到可用的集群列表,选择要分配给企业空间的集群并点击**创建**。 5. 创建企业空间后,拥有必要权限的企业空间成员可以创建资源,在关联集群上运行。 - ![创建项目](/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png) - {{< notice warning >}} -尽量不要在 Host 集群上创建资源,避免负载过高导致多集群稳定性下降。 +尽量不要在主集群上创建资源,避免负载过高导致多集群稳定性下降。 {{}} @@ -52,11 +46,7 @@ weight: 8610 4. 您可以看到已授权企业空间的列表,这意味着所有这些企业空间中的资源都能使用当前集群。 - ![设置集群可见性1](/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.PNG) - -5. 点击**编辑可见范围**设置集群授权。您可以选择让新的企业空间使用该集群,或者将该集群从企业空间解绑。 - - ![设置集群可见性2](/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.PNG) +5. 点击**编辑可见性**设置集群可见性。您可以选择让新的企业空间使用该集群,或者将该集群从企业空间解绑。 ### 将集群设置为公开集群 diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md index 08856f465..bce4fe493 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md +++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md @@ -1,5 +1,5 @@ --- -linkTitle: "日志收集" +linkTitle: "日志接收器" weight: 8620 _build: diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md index 6e4544d72..6e465c8a9 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md +++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md @@ -1,7 +1,7 @@ --- title: "添加 Elasticsearch 作为接收器" keywords: 'Kubernetes, 日志, Elasticsearch, Pod, 容器, Fluentbit, 输出' -description: '了解如何添加 Elasticsearch 来接收日志、事件或审计日志。' +description: '了解如何添加 Elasticsearch 来接收容器日志、资源事件或审计日志。' linkTitle: "添加 Elasticsearch 作为接收器" weight: 8622 --- @@ -9,7 +9,7 @@ weight: 8622 ## 准备工作 -- 您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 - 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 ## 添加 Elasticsearch 作为接收器 @@ -22,15 +22,13 @@ weight: 8622 {{}} -2. 在**集群管理**页面,选择**集群设置**下的**日志收集**。 +2. 在**集群管理**页面,选择**集群设置**下的**日志接收器**。 3. 点击**添加日志接收器**并选择 **Elasticsearch**。 -4. 提供 Elasticsearch 服务地址和端口信息,如下所示: +4. 提供 Elasticsearch 服务地址和端口信息。 - ![add-es](/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-es-as-receiver/add-es.png) - -5. Elasticsearch 会显示在**日志收集**页面的接收器列表中,状态为**收集中**。 +5. Elasticsearch 会显示在**日志接收器**页面的接收器列表中,状态为**收集中**。 6. 若要验证 Elasticsearch 是否从 Fluent Bit 接收日志,从右下角的**工具箱**中点击**日志查询**,在控制台中搜索日志。有关更多信息,请参阅[日志查询](../../../../toolbox/log-query/)。 diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md index 5ecbf9fdf..dc90d4e52 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md +++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md @@ -1,19 +1,19 @@ --- title: "添加 Fluentd 作为接收器" -keywords: 'Kubernetes, 日志, Fluentd, Pod, 容器, Fluentbit, 输出' -description: '了解如何添加 Fluentd 来接收日志、事件或审计日志。' +keywords: 'Kubernetes, 日志, Fluentd, 容器组, 容器, Fluentbit, 输出' +description: '了解如何添加 Fluentd 来接收容器日志、资源事件或审计日志。' linkTitle: "添加 Fluentd 作为接收器" weight: 8624 --- 您可以在 KubeSphere 中使用 Elasticsearch、Kafka 和 Fluentd 日志接收器。本教程演示: -- 创建 Fluentd 部署以及对应的服务和 ConfigMap。 -- 添加 Fluentd 作为日志接收器以接收来自 Fluent Bit 的日志,并输出为 stdout(标准输出)。 +- 创建 Fluentd 部署以及对应的服务(Service)和配置字典(ConfigMap)。 +- 添加 Fluentd 作为日志接收器以接收来自 Fluent Bit 的日志,并输出为标准输出。 - 验证 Fluentd 能否成功接收日志。 ## 准备工作 -- 您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 - 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 @@ -25,7 +25,7 @@ weight: 8624 {{< notice note >}} -- 以下命令将在默认命名空间 `default` 中创建 Fluentd 部署、服务和 ConfigMap,并为该 Fluentd ConfigMap 添加 `filter` 以排除 `default` 命名空间中的日志,避免 Fluent Bit 和 Fluentd 重复日志收集。 +- 以下命令将在默认命名空间 `default` 中创建 Fluentd 部署、服务和配置字典,并为该 Fluentd 配置字典添加 `filter` 以排除 `default` 命名空间中的日志,避免 Fluent Bit 和 Fluentd 重复日志收集。 - 如果您想要将 Fluentd 部署至其他命名空间,请修改以下命令中的命名空间名称。 {{}} @@ -130,29 +130,25 @@ EOF {{}} -2. 在**集群管理**页面,选择**集群设置**下的**日志收集**。 +2. 在**集群管理**页面,选择**集群设置**下的**日志接收器**。 3. 点击**添加日志接收器**并选择 **Fluentd**。 -4. 输入 **Fluentd** 服务地址和端口信息,如下所示: +4. 输入 **Fluentd** 服务地址和端口信息。 - ![add-fluentd](/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/add-fluentd.png) - -5. Fluentd 会显示在**日志收集**页面的接收器列表中,状态为**收集中**。 +5. Fluentd 会显示在**日志接收器**页面的接收器列表中,状态为**收集中**。 ## 步骤 3:验证 Fluentd 能否从 Fluent Bit 接收日志 1. 在**集群管理**页面点击**应用负载**。 -2. 点击**工作负载**,并从**部署**选项卡下的下拉菜单中选择 `default` 项目。 +2. 点击**工作负载**,并在**部署**选项卡中选择 `default` 项目。 -3. 点击 **fluentd** 项目并选择 **fluentd-xxxxxxxxx-xxxxx** Pod。 +3. 点击 **fluentd** 项目并选择 **fluentd-xxxxxxxxx-xxxxx** 容器组。 4. 点击 **fluentd** 容器。 5. 在 **fluentd** 容器页面,选择**容器日志**选项卡。 -6. 您可以看到日志持续滚动输出。 - - ![container-logs](/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/container-logs.png) \ No newline at end of file +6. 您可以看到日志持续滚动输出。 \ No newline at end of file diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md index 4eedab994..4e452637c 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md +++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md @@ -1,24 +1,24 @@ --- title: "添加 Kafka 作为接收器" keywords: 'Kubernetes, 日志, Kafka, Pod, 容器, Fluentbit, 输出' -description: '了解如何添加 Kafka 来接收日志、事件或审计日志。' +description: '了解如何添加 Kafka 来接收容器日志、资源事件或审计日志。' linkTitle: "添加 Kafka 作为接收器" weight: 8623 --- 您可以在 KubeSphere 中使用 Elasticsearch、Kafka 和 Fluentd 日志接收器。本教程演示: -- 部署 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator),然后通过创建 `Kafka` 和 `KafkaTopic` CRD 以创建 Kafka 集群和 Kafka Topic。 +- 部署 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator),然后通过创建 `Kafka` 和 `KafkaTopic` CRD 以创建 Kafka 集群和 Kafka 主题。 - 添加 Kafka 作为日志接收器以从 Fluent Bit 接收日志。 - 使用 [Kafkacat](https://github.com/edenhill/kafkacat) 验证 Kafka 集群是否能接收日志。 ## 准备工作 -- 您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 - 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。本教程启用 `logging` 作为示例。 -## 步骤 1:创建 Kafka 集群和 Kafka Topic +## 步骤 1:创建 Kafka 集群和 Kafka 主题 -您可以使用 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) 创建 Kafka 集群和 Kafka Topic。如果您已经有了一个 Kafka 集群,您可以直接从下一步开始。 +您可以使用 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) 创建 Kafka 集群和 Kafka 主题。如果您已经有了一个 Kafka 集群,您可以直接从下一步开始。 1. 在 `default` 命名空间中安装 [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator): @@ -31,7 +31,7 @@ weight: 8623 ``` -2. 运行以下命令在 `default` 命名空间中创建 Kafka 集群和 Kafka Topic,该命令所创建的 Kafka 和 Zookeeper 集群的存储类型为 `ephemeral`,使用 `emptyDir` 进行演示。若要在生产环境下配置储存类型,请参见 [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml)。 +2. 运行以下命令在 `default` 命名空间中创建 Kafka 集群和 Kafka 主题,该命令所创建的 Kafka 和 Zookeeper 集群的存储类型为 `ephemeral`,使用 `emptyDir` 进行演示。若要在生产环境下配置储存类型,请参见 [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml)。 ```yaml cat <}} -2. 在**集群管理**页面,选择**集群设置**下的**日志收集**。 +2. 在**集群管理**页面,选择**集群设置**下的**日志接收器**。 -3. 点击**添加日志接收器**并选择 **Kafka**。输入 Kafka 代理地址和端口信息,然后点击**确定**继续。 +3. 点击**添加日志接收器**并选择 **Kafka**。输入 Kafka 服务地址和端口信息,然后点击**确定**继续。 - | 地址 | 端口 | + | 服务地址 | 端口号 | | ------------------------------------------------------- | ---- | | my-cluster-kafka-0.my-cluster-kafka-brokers.default.svc | 9092 | | my-cluster-kafka-1.my-cluster-kafka-brokers.default.svc | 9092 | | my-cluster-kafka-2.my-cluster-kafka-brokers.default.svc | 9092 | - ![add-kafka](/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-kafka-as-receiver/add-kafka.png) - 4. 运行以下命令验证 Kafka 集群是否能从 Fluent Bit 接收日志: ```bash diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/introduction.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/introduction.md index e068ff3c7..9461e1121 100644 --- a/content/zh/docs/cluster-administration/cluster-settings/log-collections/introduction.md +++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/introduction.md @@ -1,18 +1,18 @@ --- title: "介绍" keywords: 'Kubernetes, 日志, Elasticsearch, Kafka, Fluentd, Pod, 容器, Fluentbit, 输出' -description: '了解集群日志收集的基础知识,包括工具和一般步骤。' +description: '了解集群日志接收器的基础知识,包括工具和一般步骤。' linkTitle: "介绍" weight: 8621 --- -KubeSphere 提供灵活的日志收集配置方式。基于 [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator/),用户可以轻松添加、修改、删除、启用或禁用 Elasticsearch、Kafka 和 Fluentd 接收器。接收器添加后,日志会发送至该接收器。 +KubeSphere 提供灵活的日志接收器配置方式。基于 [FluentBit Operator](https://github.com/kubesphere/fluentbit-operator/),用户可以轻松添加、修改、删除、启用或禁用 Elasticsearch、Kafka 和 Fluentd 接收器。接收器添加后,日志会发送至该接收器。 此教程简述在 KubeSphere 中添加日志接收器的一般性步骤。 ## 准备工作 -- 您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +- 您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 - 添加日志接收器前,您需要启用组件 `logging`、`events` 或 `auditing`。有关更多信息,请参见[启用可插拔组件](../../../../pluggable-components/)。 @@ -30,9 +30,9 @@ KubeSphere 提供灵活的日志收集配置方式。基于 [FluentBit Operator] {{}} -3. 选择**集群设置**下的**日志收集**。 +3. 选择**集群设置**下的**日志接收器**。 -4. 在**日志**选项卡下点击**添加日志接收器**。 +4. 在日志接收器列表页,点击**添加日志接收器**。 {{< notice note >}} @@ -43,9 +43,9 @@ KubeSphere 提供灵活的日志收集配置方式。基于 [FluentBit Operator] ### 添加 Elasticsearch 作为日志接收器 -如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) 中启用了 `logging`、`events` 或 `auditing`,则会添加默认的 Elasticsearch 接收器,服务地址会设为 Elasticsearch 集群。 +如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) 中启用了 `logging`、`events` 或 `auditing`,则会添加默认的 Elasticsearch 接收器,服务地址会设为 Elasticsearch 集群。 -当 `logging`、`events` 或 `auditing` 启用时,如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) 中未指定 `externalElasticsearchUrl` 和 `externalElasticsearchPort`,则内置 Elasticsearch 集群会部署至 Kubernetes 集群。内置 Elasticsearch 集群仅用于测试和开发。生产环境下,建议您集成外置 Elasticsearch 集群。 +当 `logging`、`events` 或 `auditing` 启用时,如果 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) 中未指定 `externalElasticsearchHost` 和 `externalElasticsearchPort`,则内置 Elasticsearch 集群会部署至 Kubernetes 集群。内置 Elasticsearch 集群仅用于测试和开发。生产环境下,建议您集成外置 Elasticsearch 集群。 日志查询需要依靠所配置的内置或外置 Elasticsearch 集群。 @@ -59,33 +59,29 @@ Kafka 往往用于接收日志,并作为 Spark 等处理系统的代理 (Broke 如果您需要将日志输出到除 Elasticsearch 或 Kafka 以外的其他地方,您可以添加 Fluentd 作为日志接收器。Fluentd 支持多种输出插件,可以将日志发送至多个目标,例如 S3、MongoDB、Cassandra、MySQL、syslog 和 Splunk 等。[添加 Fluentd 作为接收器](../add-fluentd-as-receiver/)演示如何添加 Fluentd 接收 Kubernetes 日志。 -## 为事件或审计日志添加日志接收器 +## 为资源事件或审计日志添加日志接收器 -自 KubeSphere v3.0.0 起,Kubernetes 事件和 Kubernetes 以及 KubeSphere 审计日志可以通过和容器日志相同的方式进行存档。如果在 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md) 中启用了 `events` 或 `auditing`,**日志收集**页面会对应显示**事件**或**审计**选项卡。您可以前往对应选项卡为 Kubernetes 事件或 Kubernetes 以及 KubeSphere 审计日志配置日志接收器。 +自 KubeSphere v3.0.0 起,资源事件和审计日志可以通过和容器日志相同的方式进行存档。如果在 [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md) 中启用了 `events` 或 `auditing`,**日志接收器**页面会对应显示**资源事件**或**审计日志**选项卡。您可以前往对应选项卡为资源事件或审计日志配置日志接收器。 -容器日志、Kubernetes 事件和 Kubernetes 以及 KubeSphere 审计日志应存储在不同的 Elasticsearch 索引中以便在 KubeSphere 中进行搜索,索引前缀如下: - -- 容器日志:`ks-logstash-log` -- Kubernetes 事件:`ks-logstash-events` -- Kubernetes 和 KubeSphere 审计日志:`ks-logstash-auditing` +容器日志、资源事件和审计日志应存储在不同的 Elasticsearch 索引中以便在 KubeSphere 中进行搜索。系统以`<索引前缀>-<年-月-日>`格式自动生成索引。 ## 启用或停用日志接收器 无需新增或删除日志接收器,您可以随时启用或停用日志接收器,具体步骤如下: -1. 在**日志收集**页面,点击一个日志接收器并进入其详情页面。 +1. 在**日志接收器**页面,点击一个日志接收器并进入其详情页面。 2. 点击**更多操作**并选择**更改状态**。 -3. 选择**激活**或**关闭**以启用或停用该日志接收器。 +3. 选择**收集中**或**关闭**以启用或停用该日志接收器。 4. 停用后,日志接收器的状态会变为**关闭**,激活时状态为**收集中**。 -## 修改或删除日志接收器 +## 编辑或删除日志接收器 -您可以修改或删除日志接收器: +您可以编辑或删除日志接收器: -1. 在**日志收集**页面,点击一个日志接收器并进入其详情页面。 -2. 点击**编辑**或从下拉菜单中选择**编辑配置文件**以编辑日志接收器。 +1. 在**日志接收器**页面,点击一个日志接收器并进入其详情页面。 +2. 点击**编辑**或从下拉菜单中选择**编辑 YAML** 以编辑日志接收器。 -3. 点击**删除日志接收器**进行删除。 +3. 点击**删除**以删除日志接收器。 diff --git a/content/zh/docs/cluster-administration/cluster-status-monitoring.md b/content/zh/docs/cluster-administration/cluster-status-monitoring.md index b92b7403a..cbc554174 100644 --- a/content/zh/docs/cluster-administration/cluster-status-monitoring.md +++ b/content/zh/docs/cluster-administration/cluster-status-monitoring.md @@ -1,40 +1,32 @@ --- title: "集群状态监控" keywords: "Kubernetes, KubeSphere, 状态, 监控" -description: "根据不同的指标(包括物理资源、etcd 和 APIServer)监控集群如何运行。" +description: "根据不同的指标(包括物理资源、etcd 和 API server)监控集群如何运行。" linkTitle: "集群状态监控" weight: 8200 --- -KubeSphere 支持对集群 CPU、内存、网络和磁盘等资源的相关指标进行监控。在**集群状态监控**页面,您可以查看历史监控数据并根据不同资源的使用率对节点进行排序。 +KubeSphere 支持对集群 CPU、内存、网络和磁盘等资源的相关指标进行监控。在**集群状态**页面,您可以查看历史监控数据并根据不同资源的使用率对节点进行排序。 ## 准备工作 -您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 ## 集群状态监控 1. 点击左上角的**平台管理**,然后选择**集群管理**。 -2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了 Member 集群,您可以选择一个特定集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个特定集群以查看其应用程序资源。如果尚未启用该功能,请直接进行下一步。 - ![Clusters Management](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/clusters-management.png) - -3. 在左侧导航栏选择**监控告警**下的**集群状态**以查看集群状态概览,包括**集群节点状态**、**组件状态**、**集群资源使用情况**、**ETCD 监控**和**服务组件监控**。 - - ![Cluster Status Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png) +3. 在左侧导航栏选择**监控告警**下的**集群状态**以查看集群状态概览,包括**集群节点状态**、**组件状态**、**集群资源用量**、**etcd 监控**和**服务组件监控**。 ### 集群节点状态 1. **集群节点状态**显示在线节点和所有节点的数量。您可以点击**节点在线状态**跳转到**集群节点**页面以查看所有节点的实时资源使用情况。 - ![Cluster Nodes](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-nodes.png) -2. 在**集群节点**页面,点击节点名称可打开**运行状态**页面查看 CPU、内存、容器组 (Pod)、本地存储等资源的使用详情,以及节点健康状态。 +2. 在**集群节点**页面,点击节点名称可打开**运行状态**页面查看**资源用量**,**已分配资源**和**健康状态**。 - ![Running Status](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/running-status.png) -3. 点击**监控**选项卡,可以查看节点在特定时间范围内的各种运行指标,包括 **CPU 使用情况**、**CPU 平均负载**、**内存使用情况**、**磁盘利用率**、**inode 使用率**、**IOPS**、**磁盘吞吐**和**网络带宽**。 - - ![Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/monitoring.png) +3. 点击**监控**选项卡,可以查看节点在特定时间范围内的各种运行指标,包括 **CPU 用量**、**CPU 平均负载**、**内存用量**、**磁盘用量**、**Inode 用量**、**IOPS**、**磁盘吞吐**和**网络带宽**。 {{< notice tip >}} @@ -46,40 +38,29 @@ KubeSphere 支持对集群 CPU、内存、网络和磁盘等资源的相关指 KubeSphere 监控集群中各种服务组件的健康状态。当关键组件发生故障时,系统可能会变得不可用。KubeSphere 的监控机制确保平台可以在组件出现故障时将所有问题通知租户,以便快速定位问题并采取相应的措施。 -1. 在**集群状态监控**页面,点击**组件状态**区域的组件以查看其状态。 +1. 在**集群状态**页面,点击**组件状态**区域的组件可查看其状态。 - ![component-monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/component-monitoring.png) -2. **服务组件**页面列出了所有的组件。标记为绿色的组件是正常运行的组件,标记为橙色的组件存在问题,需要特别关注。 +2. **系统组件**页面列出了所有的组件。标记为绿色的组件是正常运行的组件,标记为橙色的组件存在问题,需要特别关注。 - ![Service Components Status](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/service-components-status.png) {{< notice tip >}} 标记为橙色的组件可能会由于各种原因在一段时间后变为绿色,例如重试拉取镜像或重新创建实例。您可以点击一个组件查看其服务详情。 {{}} -### 集群资源使用情况 +### 集群资源用量 -**集群资源使用情况**显示集群中所有节点的 **CPU 使用情况**、**内存使用情况**、**磁盘利用率**和**容器组数量变化**。您可以点击左侧的饼图切换指标。右侧的曲线图显示一段时间内指示的变化趋势。 - -![Cluster Resources Usage](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png) +**集群资源用量**显示集群中所有节点的 **CPU 用量**、**内存用量**、**磁盘用量**和**容器组数量**。您可以点击左侧的饼图切换指标。右侧的曲线图显示一段时间内指示的变化趋势。 ## 物理资源监控 -您可以利用**物理资源监控**页面提供的数据更好地掌控物理资源状态,并建立正常资源和集群性能的标准。KubeSphere 允许用户查看最近 7 天的集群监控数据,包括 **CPU 使用情况**、**内存使用情况**、**CPU 平均负载(1 分钟/5 分钟/15 分钟)**、**inode 使用率**、**磁盘吞吐(读写)**、**IOPS(读写)**、**网络带宽**和**容器组运行状态**。您可以在 KubeSphere 中自定义时间范围和时间间隔以查看物理资源的历史监控数据。以下简要介绍每个监控指标。 +您可以利用**物理资源监控**页面提供的数据更好地掌控物理资源状态,并建立正常资源和集群性能的标准。KubeSphere 允许用户查看最近 7 天的集群监控数据,包括 **CPU 用量**、**内存用量**、**CPU 平均负载(1 分钟/5 分钟/15 分钟)**、**磁盘用量**、**Inode 用量**、**磁盘吞吐(读写)**、**IOPS(读写)**、**网络带宽**和**容器组状态**。您可以在 KubeSphere 中自定义时间范围和时间间隔以查看物理资源的历史监控数据。以下简要介绍每个监控指标。 -![Physical Resources Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png) +### CPU 用量 -### CPU 使用情况 +**CPU 用量**显示一段时间内 CPU 资源的用量。如果某一时间段的 CPU 用量急剧上升,您首先需要定位占用 CPU 资源最多的进程。例如,Java 应用程序代码中的内存泄漏或无限循环可能会导致 CPU 用量急剧上升。 -**CPU 使用情况**显示一段时间内 CPU 资源的使用率。如果某一时间段的 CPU 使用率急剧上升,您首先需要定位占用 CPU 资源最多的进程。例如,Java 应用程序代码中的内存泄漏或无限循环可能会导致 CPU 使用率急剧上升。 - -![CPU Utilization](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-utilization.png) - -### 内存使用情况 - -内存是机器上的重要组件之一,是与 CPU 通信的桥梁。因此,内存对机器的性能有很大影响。当程序运行时,数据加载、线程并发和 I/O 缓冲都依赖于内存。可用内存的大小决定了程序能否正常运行以及如何运行。**内存使用情况**反映了集群内存资源的整体使用情况,显示为特定时刻内存占用的百分比。 - -![Memory Utilization](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/memory-utilization.png) +### 内存用量 +内存是机器上的重要组件之一,是与 CPU 通信的桥梁。因此,内存对机器的性能有很大影响。当程序运行时,数据加载、线程并发和 I/O 缓冲都依赖于内存。可用内存的大小决定了程序能否正常运行以及如何运行。**内存使用情况**反映了集群内存资源的整体用量,显示为特定时刻内存占用的百分比。 ### CPU 平均负载 CPU 平均负载是单位时间内系统中处于可运行状态和非中断状态的平均进程数(亦即活动进程的平均数量)。CPU 平均负载和 CPU 利用率之间没有直接关系。理想情况下,平均负载应该等于 CPU 的数量。因此,在查看平均负载时,需要考虑 CPU 的数量。只有当平均负载大于 CPU 数量时,系统才会超载。 @@ -90,55 +71,41 @@ KubeSphere 为用户提供了 1 分钟、5 分钟和 15 分钟三种不同的平 - 如果某一时间范围或某一特定时间点 1 分钟的数值远大于 15 分钟的数值,则表明最近 1 分钟的负载在增加,需要继续观察。一旦 1 分钟的数值超过 CPU 数量,系统可能出现超载,您需要进一步分析问题的根源。 - 如果某一时间范围或某一特定时间点 1 分钟的数值远小于 15 分钟的数值,则表明系统在最近 1 分钟内负载在降低,在前 15 分钟内出现了较高的负载。 -![CPU Load Average](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-load-average.png) +### 磁盘用量 -### 磁盘使用量 +KubeSphere 的工作负载(例如`有状态副本集`和`守护进程集`)都依赖于持久卷。某些组件和服务也需要持久卷。这种后端存储依赖于磁盘,例如块存储或网络共享存储。因此,实时的磁盘用量监控环境对确保数据的高可靠性尤为重要。 -KubeSphere 的工作负载(例如 `StatefulSets` 和 `DaemonSets`)都依赖于持久卷。某些组件和服务也需要持久卷。这种后端存储依赖于磁盘,例如块存储或网络共享存储。因此,实时的磁盘用量监控环境对确保数据的高可靠性尤为重要。 +在 Linux 系统的日常管理中,平台管理员可能会遇到磁盘空间不足导致数据丢失甚至系统崩溃的情况。作为集群管理的重要组成部分,平台管理员需要密切关注系统的磁盘使用情况,并确保文件系统不会被用尽或滥用。通过监控磁盘使用的历史数据,您可以评估特定时间范围内磁盘的使用情况。在磁盘用量较高的情况下,您可以通过清理不必要的镜像或容器来释放磁盘空间。 -在 Linux 系统的日常管理中,平台管理员可能会遇到磁盘空间不足导致数据丢失甚至系统崩溃的情况。作为集群管理的重要组成部分,平台管理员需要密切关注系统的磁盘使用情况,并确保文件系统不会被用尽或滥用。通过监控磁盘使用的历史数据,您可以评估特定时间范围内磁盘的使用情况。在磁盘使用率较高的情况下,您可以通过清理不必要的镜像或容器来释放磁盘空间。 - -![Disk Usage](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-usage.png) - -### inode 使用率 +### Inode 用量 每个文件都有一个 inode,用于存储文件的创建者和创建日期等元信息。inode 也会占用磁盘空间,众多的小缓存文件很容易导致 inode 资源耗尽。此外,在 inode 已用完但磁盘未满的情况下,也无法在磁盘上创建新文件。 在 KubeSphere 中,对 inode 使用率的监控可以帮助您清楚地了解集群 inode 的使用率,从而提前检测到此类情况。该机制提示用户及时清理临时文件,防止集群因 inode 耗尽而无法工作。 -![inode Utilization](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/inode-utilization.png) - ### 磁盘吞吐 磁盘吞吐和 IOPS 监控是磁盘监控不可或缺的一部分,可帮助集群管理员调整数据布局和其他管理活动以优化集群整体性能。磁盘吞吐量是指磁盘传输数据流(包括读写数据)的速度,单位为 MB/s。当传输大块非连续数据时,该指标具有重要的参考意义。 -![Disk Throughput](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-throughput.png) - ### IOPS -IOPS 表示每秒读取和写入操作数。具体来说,磁盘的 IOPS 是每秒连续读取和写入的总和。当传输小块非连续数据时,该指示器具有重要的参考意义。 - -![IOPS](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/iops.png) +IOPS 表示每秒读写操作数。具体来说,磁盘的 IOPS 是每秒连续读写的总和。当传输小块非连续数据时,该指示器具有重要的参考意义。 ### 网络带宽 网络带宽是网卡每秒接收或发送数据的能力,单位为 Mbps。 -![Network Bandwidth](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/netework-bandwidth.png) +### 容器组状态 -### 容器组运行状态 +**容器组状态**显示不同状态的容器组的总数,包括**运行中**、**已完成**和**异常**状态。标记为**已完成**的容器组通常为任务(Job)或定时任务(CronJob)。标记为**异常**的容器组需要特别注意。 -**容器组运行状态**显示不同状态的 Pod 的总数,包括**运行中**、**已完成**和**异常**状态。标记为**已完成**的 Pod 通常为 Job 或 CronJob。标记为**异常**的 Pod 需要特别注意。 +## etcd 监控 -![Pod Status](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/pod-status.png) - -## ETCD 监控 - -**ETCD 监控**可以帮助您更好地利用 etcd,特别用于是定位性能问题。etcd 服务提供了原生的指标接口。KubeSphere 监控系统提供了高度图形化和响应性强的仪表板,用于显示原生数据。 +**etcd 监控**可以帮助您更好地利用 etcd,特别用于是定位性能问题。etcd 服务提供了原生的指标接口。KubeSphere 监控系统提供了高度图形化和响应性强的仪表板,用于显示原生数据。 | 指标 | 描述 | | --- | --- | -| ETCD 节点 | - **是否有 Leader** 表示成员是否有 Leader。如果成员没有 Leader,则成员完全不可用。如果集群中的所有成员都没有任何 Leader,则整个集群完全不可用。
    - **Leader 变更次数**表示集群成员观察到的 Leader 变更总次数。频繁变更 Leader 将显著影响 etcd 性能,同时这还表明 Leader 可能由于网络连接问题或 etcd 集群负载过高而不稳定。 | +| 服务状态 | - **是否有 Leader** 表示成员是否有 Leader。如果成员没有 Leader,则成员完全不可用。如果集群中的所有成员都没有任何 Leader,则整个集群完全不可用。
    - **1 小时内 Leader 变更次数**表示集群成员观察到的 1 小时内 Leader 变更总次数。频繁变更 Leader 将显著影响 etcd 性能,同时这还表明 Leader 可能由于网络连接问题或 etcd 集群负载过高而不稳定。 | | 库大小 | etcd 的底层数据库大小,单位为 MiB。图表中显示的是 etcd 的每个成员数据库的平均大小。 | | 客户端流量 | 包括发送到 gRPC 客户端的总流量和从 gRPC 客户端接收的总流量。有关该指标的更多信息,请参阅[ etcd Network](https://github.com/etcd-io/etcd/blob/v3.2.17/Documentation/metrics.md#network)。 | | gRPC 流式消息 | 服务器端的 gRPC 流消息接收速率和发送速率,反映集群内是否正在进行大规模的数据读写操作。有关该指标的更多信息,请参阅[ go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus#counters)。 | @@ -146,22 +113,18 @@ IOPS 表示每秒读取和写入操作数。具体来说,磁盘的 IOPS 是每 | 库同步时间 | 后端调用提交延迟的分布。当 etcd 将其最新的增量快照提交到磁盘时,会调用 `backend_commit`。需要注意的是,磁盘操作延迟较大(WAL 日志同步时间或库同步时间较长)通常表示磁盘存在问题,这可能会导致请求延迟过高或集群不稳定。有关该指标的详细信息,请参阅[ etcd Disk](https://etcd.io/docs/v3.3.12/metrics/#disk)。 | | Raft 提议 | - **提议提交速率**记录提交的协商一致提议的速率。如果集群运行状况良好,则该指标应随着时间的推移而增加。etcd 集群的几个健康成员可以同时具有不同的一般提议。单个成员与其 Leader 之间的持续较大滞后表示该成员缓慢或不健康。
    - **提议应用速率**记录协商一致提议的总应用率。etcd 服务器异步地应用每个提交的提议。**提议提交速率**和**提议应用速率**的差异应该很小(即使在高负载下也只有几千)。如果它们之间的差异持续增大,则表明 etcd 服务器过载。当使用大范围查询或大量 txn 操作等大规模查询时,可能会出现这种情况。
    - **提议失败速率**记录提议失败的总速率。这通常与两个问题有关:与 Leader 选举相关的临时失败或由于集群成员数目达不到规定数目而导致的长时间停机。
    - **排队提议数**记录当前待处理提议的数量。待处理提议的增加表明客户端负载较高或成员无法提交提议。
    目前,仪表板上显示的数据是 etcd 成员的平均数值。有关这些指标的详细信息,请参阅[ etcd Server](https://etcd.io/docs/v3.3.12/metrics/#server)。 | -![ETCD Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/etcd-monitoring.png) +## API Server 监控 -## APIServer 监控 - -[APIServer](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) 是 Kubernetes 集群中所有组件交互的中枢。下表列出了 APIServer 的主要监控指标。 +[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) 是 Kubernetes 集群中所有组件交互的中枢。下表列出了 API Server 的主要监控指标。 | 指标 | 描述 | | --- | --- | | 请求延迟 | 资源请求响应延迟,单位为毫秒。该指标按照 HTTP 请求方法进行分类。 | | 每秒请求次数 | kube-apiserver 每秒接受的请求数。 | -![APIServer Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png) - ## 调度器监控 -[调度器](https://kubernetes.io/zh/docs/reference/command-line-tools-reference/kube-scheduler/)监控新建 Pod 的 Kubernetes API,并决定这些新 Pod 运行在哪些节点上。调度器根据收集资源的可用性和 Pod 的资源需求等数据进行决策。监控调度延迟的数据可确保您及时了解调度器的任何延迟。 +[调度器](https://kubernetes.io/zh/docs/reference/command-line-tools-reference/kube-scheduler/)监控新建容器组的 Kubernetes API,并决定这些新容器组运行在哪些节点上。调度器根据收集资源的可用性和容器组的资源需求等数据进行决策。监控调度延迟的数据可确保您及时了解调度器的任何延迟。 | 指标 | 描述 | | --- | --- | @@ -169,10 +132,6 @@ IOPS 表示每秒读取和写入操作数。具体来说,磁盘的 IOPS 是每 | 调度频率 | 包括调度成功、错误和失败的频率。 | | 调度延迟 | 端到端调度延迟,即调度算法延迟和绑定延迟之和。 | -![Scheduler Monitoring](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png) - ## 节点用量排行 -您可以按 **CPU 使用率**、**CPU 平均负载**、**内存使用率**、**本地存储用量**、**inode 使用率**和**容器组用量**等指标对节点进行升序和降序排序。您可以利用这一功能快速发现潜在问题和节点资源不足的情况。 - -![Node Usage Ranking](/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/node-usage-ranking.png) \ No newline at end of file +您可以按 **CPU 用量**、**CPU 平均负载**、**内存用量**、**本地存储用量**、**Inode 用量**和**容器组用量**等指标对节点进行升序和降序排序。您可以利用这一功能快速发现潜在问题和节点资源不足的情况。 \ No newline at end of file diff --git a/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md b/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md index 7bda91f79..1b5c0c490 100644 --- a/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md +++ b/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md @@ -11,18 +11,16 @@ weight: 8540 ## 准备工作 - 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 -- 您需要创建一个帐户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 +- 您需要创建一个用户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 - 您已经创建节点级别的告警策略并已触发该告警。有关更多信息,请参考[告警策略(节点级别)](../alerting-policy/)。 ## 查看告警消息 1. 使用 `cluster-admin` 帐户登录 KubeSphere 控制台,导航到**监控告警**下的**告警消息**。 -2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示了为告警通知定义的标题和消息。如需查看告警消息的详情,点击告警策略的名称,然后点击所出现页面上的**告警消息**选项卡。 +2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示了为告警消息定义的概括和详情。如需查看告警消息的详细信息,点击告警策略的名称,然后点击告警策略详情页面上的**告警历史**选项卡。 - ![alert-message-page](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alert-message-page.png) - -3. 在**告警消息**选项卡,您可以看到告警级别、告警资源和告警激活时间。 +3. 在**告警历史**选项卡,您可以看到告警级别、监控目标和告警激活时间。 ## 查看通知 diff --git a/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md b/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md index 993aa982e..b16c1677f 100644 --- a/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md +++ b/content/zh/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md @@ -14,8 +14,8 @@ KubeSphere 还具有内置策略,一旦满足为这些策略定义的条件, ## 准备工作 - 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting)。 -- 如需接收告警通知,您需要预先配置一个[通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 -- 您需要创建一个帐户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 +- 如需接收告警通知,您需要预先配置[通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 +- 您需要创建一个用户 (`cluster-admin`) 并授予其 `clusters-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-4-create-a-role)。 - 您需要确保集群中存在工作负载。如果尚未就绪,请参见[部署并访问 Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) 创建一个示例应用。 ## 创建告警策略 @@ -28,13 +28,13 @@ KubeSphere 还具有内置策略,一旦满足为这些策略定义的条件, - **名称**:使用简明名称作为其唯一标识符,例如 `node-alert`。 - **别名**:帮助您更好地识别告警策略。 - - **告警持续时间(分钟)**:若在告警持续时间内的任意时间点均满足为告警策略定义的条件,告警将会触发。 + - **阈值时间(分钟)**:告警规则中设置的情形持续时间达到该阈值后,告警策略将变为触发中状态。 - **告警级别**:提供的值包括**一般告警**、**重要告警**和**危险告警**,代表告警的严重程度。 - **描述信息**:对告警策略的简要介绍。 -4. 在**告警规则**选项卡,您可以使用规则模板或创建自定义规则。如需使用规则模板,请填写以下字段,然后点击**下一步**继续。 +4. 在**规则设置**选项卡,您可以使用规则模板或创建自定义规则。如需使用规则模板,请设置以下参数,然后点击**下一步**继续。 - - **监控目标**:选择集群中的一个节点进行监控。 + - **监控目标**:选择至少一个集群节点进行监控。 - **告警规则**:为告警策略定义一个规则。下拉菜单中提供的规则基于 Prometheus 表达式,满足条件时将会触发告警。您可以对 CPU、内存等对象进行监控。 {{< notice note >}} @@ -43,25 +43,23 @@ KubeSphere 还具有内置策略,一旦满足为这些策略定义的条件, {{}} -5. 在**通知设置**选项卡,输入想要包含在通知中的告警标题和消息,点击**创建**。 +5. 在**消息设置**选项卡,输入告警消息的概括和详情,点击**创建**。 -6. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到告警持续时间后,将变为**触发中**状态。 +6. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到阈值时间后,将变为**触发中**状态。 ## 编辑告警策略 如需在创建后编辑告警策略,在**告警策略**页面点击右侧的 。 -1. 点击下拉菜单中的**编辑**,根据与创建时相同的步骤来编辑告警策略。点击**通知设置**页面的**更新**保存更改。 +1. 点击下拉菜单中的**编辑**,根据与创建时相同的步骤来编辑告警策略。点击**消息设置**页面的**确定**保存更改。 2. 点击下拉菜单中的**删除**以删除告警策略。 ## 查看告警策略 -在**告警策略**页面,点击一个告警策略的名称查看其详情,包括告警规则和告警消息。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 +在**告警策略**页面,点击一个告警策略的名称查看其详情,包括告警规则和告警历史。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 -在**监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**通知设置**显示您在通知中设置的自定义消息。 - -![alerting-policy-details-page](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting-policy-details-page.png) +在**监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**告警消息**显示您在通知中设置的自定义消息。 {{< notice note >}} diff --git a/content/zh/docs/cluster-administration/nodes.md b/content/zh/docs/cluster-administration/nodes.md index 36f6c2ffe..93a26e68a 100644 --- a/content/zh/docs/cluster-administration/nodes.md +++ b/content/zh/docs/cluster-administration/nodes.md @@ -7,62 +7,46 @@ linkTitle: "节点管理" weight: 8100 --- -Kubernetes 将容器放入 Pod 中并在节点上运行,从而运行工作负载。取决于具体的集群环境,节点可以是虚拟机,也可以是物理机。每个节点都包含运行 Pod 所需的服务,这些服务由控制平面管理。有关节点的更多信息,请参阅[ Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/architecture/nodes/)。 +Kubernetes 将容器放入容器组(Pod)中并在节点上运行,从而运行工作负载。取决于具体的集群环境,节点可以是虚拟机,也可以是物理机。每个节点都包含运行容器组所需的服务,这些服务由控制平面管理。有关节点的更多信息,请参阅[ Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/architecture/nodes/)。 本教程介绍集群管理员可查看的集群节点信息和可执行的操作。 ## 准备工作 -您需要一个被授予**集群管理**权限的帐户。例如,您可以直接用 `admin` 帐户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个帐户。 +您需要一个被授予**集群管理**权限的用户。例如,您可以直接用 `admin` 用户登录控制台,或创建一个具有**集群管理**权限的角色然后将此角色授予一个用户。 ## 节点状态 只有集群管理员可以访问集群节点。由于一些节点指标对集群非常重要,集群管理员应监控这些指标并确保节点可用。请按照以下步骤查看节点状态。 1. 点击左上角的**平台管理**,然后选择**集群管理**。 - ![clusters-select](/images/docs/zh-cn/cluster-administration/node-management/clusters-select.png) -2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了 Member 集群,您可以选择一个特定集群以查看其节点信息。如果尚未启用该功能,请直接进行下一步。 - ![cluster-management1](/images/docs/zh-cn/cluster-administration/node-management/cluster-management1.png) +2. 如果您已启用了[多集群功能](../../multicluster-management/)并已导入了成员集群,您可以选择一个特定集群以查看其节点信息。如果尚未启用该功能,请直接进行下一步。 -3. 在左侧导航栏中选择**节点管理**下的**集群节点**,查看节点的状态详情。 - - ![Node-Status](/images/docs/zh-cn/cluster-administration/node-management/Node-Status.png) +3. 在左侧导航栏中选择**节点**下的**集群节点**,查看节点的状态详情。 - **名称**:节点的名称和子网 IP 地址。 - **状态**:节点的当前状态,标识节点是否可用。 - **角色**:节点的角色,标识节点是工作节点还是主节点。 - - **CPU**:节点的实时 CPU 使用率。 - - **内存**:节点的实时内存使用率。 - - **容器组 (Pod)**:节点的实时 Pod 使用率。 - - **已分配 CPU**:该指标根据节点上 Pod 的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源。工作负载实际正在使用 CPU 资源可能低于该数值。该指标对于 Kubernetes 调度器 (kube-scheduler) 非常重要。在大多数情况下,调度器在调度 Pod 时会偏向配得 CPU 资源较少的节点。有关更多信息,请参阅[为容器管理资源](https://kubernetes.io/zh/docs/concepts/configuration/manage-resources-containers/)。 - - **已分配内存**:该指标根据节点上 Pod 的总内存请求计算得出。它表示节点上为工作负载预留的内存资源。工作负载实际正在使用内存资源可能低于该数值。 + - **CPU 用量**:节点的实时 CPU 用量。 + - **内存用量**:节点的实时内存用量。 + - **容器组**:节点的实时容器组用量。 + - **已分配 CPU**:该指标根据节点上容器组的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源。工作负载实际正在使用 CPU 资源可能低于该数值。该指标对于 Kubernetes 调度器 (kube-scheduler) 非常重要。在大多数情况下,调度器在调度容器组时会偏向配得 CPU 资源较少的节点。有关更多信息,请参阅[为容器管理资源](https://kubernetes.io/zh/docs/concepts/configuration/manage-resources-containers/)。 + - **已分配内存**:该指标根据节点上容器组的总内存请求计算得出。它表示节点上为工作负载预留的内存资源。工作负载实际正在使用内存资源可能低于该数值。 {{< notice note >}} - 在大多数情况下,**CPU** 和**已分配 CPU** 的数值不同,**内存**和**已分配内存**的数值也不同,这是正常现象。集群管理员需要同时关注一对指标。最佳实践是根据节点的实际使用情况为每个节点设置资源请求和限制。过度分配资源可能导致集群资源利用率过低,而资源分配不足可能导致集群压力过大从而处于不健康状态。 + 在大多数情况下,**CPU** 和**已分配 CPU** 的数值不同,**内存**和**已分配内存**的数值也不同,这是正常现象。集群管理员需要同时关注一对指标。最佳实践是根据节点的实际使用情况为每个节点设置资源请求和限制。资源分配不足可能导致集群资源利用率过低,而过度分配资源可能导致集群压力过大从而处于不健康状态。 {{}} ## 节点管理 点击列表中的一个节点打开节点详情页面。 -![Node-Detail](/images/docs/zh-cn/cluster-administration/node-management/Node-Detail.png) +- **停止调度/启用调度**:您可以在节点重启或维护期间将节点标记为不可调度。Kubernetes 调度器不会将新容器组调度到标记为不可调度的节点。但这不会影响节点上现有工作负载。在 KubeSphere 中,您可以点击节点详情页面的**停止调度**将节点标记为不可调度。再次点击此按钮(**启用调度**)可将节点标记为可调度。 -- **停止调度/启用调度**:您可以在节点重启或维护期间将节点标记为不可调度。Kubernetes 调度器不会将新 Pod 调度到标记为不可调度的节点。但这不会影响节点上现有工作负载。在 KubeSphere 中,您可以点击节点详情页面的**停止调度**将节点标记为不可调度。再次点击此按钮(**启用调度**)可将节点标记为可调度。 - -- **标签**:您可以利用节点标签将 Pod 分配给特定节点。首先标记节点(例如,用 `node-role.kubernetes.io/gpu-node` 标记 GPU 节点),然后在[创建工作负载](../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)时在**高级设置**中添加此标签,从而使 Pod 在 GPU 节点上运行。要添加节点标签,请点击**更多操作**,然后选择**编辑标签**。 +- **标签**:您可以利用节点标签将容器组分配给特定节点。首先标记节点(例如,用 `node-role.kubernetes.io/gpu-node` 标记 GPU 节点),然后在[创建工作负载](../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)时在**高级设置**中添加此标签,从而使容器组在 GPU 节点上运行。要添加节点标签,请点击**更多操作**,然后选择**编辑标签**。 - ![node-drop-down-list](/images/docs/zh-cn/cluster-administration/node-management/node-drop-down-list.png) - - ![label-node](/images/docs/zh-cn/cluster-administration/node-management/label-node.png) - - ![assign_pods_to_node1](/images/docs/zh-cn/cluster-administration/node-management/assign_pods_to_node1.png) - -- **污点**:污点允许节点排斥一些 Pod。您可以在节点详情页面添加或删除节点污点。要添加或删除污点,请点击**更多操作**,然后从下拉菜单中选择**污点管理**。 - - ![manage-taint](/images/docs/zh-cn/cluster-administration/node-management/manage-taint.png) - - ![add-taint](/images/docs/zh-cn/cluster-administration/node-management/add-taint.png) +- **污点**:污点允许节点排斥一些容器组。您可以在节点详情页面添加或删除节点污点。要添加或删除污点,请点击**更多操作**,然后从下拉菜单中选择**编辑污点**。 {{< notice note >}} 请谨慎添加污点,因为它们可能会导致意外行为从而导致服务不可用。有关更多信息,请参阅[污点和容忍度](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。 diff --git a/content/zh/docs/cluster-administration/persistent-volume-and-storage-class.md b/content/zh/docs/cluster-administration/persistent-volume-and-storage-class.md index 3fcd51260..22e54e11c 100644 --- a/content/zh/docs/cluster-administration/persistent-volume-and-storage-class.md +++ b/content/zh/docs/cluster-administration/persistent-volume-and-storage-class.md @@ -1,6 +1,6 @@ --- title: "持久卷和存储类型" -keywords: "存储, 存储卷, PV, PVC, 存储类型, CSI, Ceph RBD, Glusterfs, 青云QingCloud, " +keywords: "存储, 存储卷, PV, PVC, 存储类型, CSI, Ceph RBD, GlusterFS, 青云QingCloud, " description: "了解 PV、PVC 和存储类型的基本概念,并演示如何在 KubeSphere 中管理存储类型和 PVC。" linkTitle: "持久卷和存储类型" weight: 8400 @@ -10,54 +10,49 @@ weight: 8400 ## 介绍 -PersistentVolume (PV) 是集群中的一块存储,可以由管理员事先供应,或者使用存储类型来动态供应。PV 是像存储卷 (Volume) 一样的存储卷插件,但是它的生命周期独立于任何使用该 PV 的 Pod。PV 可以[静态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#static)供应或[动态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#dynamic)供应。 +PersistentVolume (PV) 是集群中的一块存储,可以由管理员事先供应,或者使用存储类型来动态供应。PV 是像存储卷 (Volume) 一样的存储卷插件,但是它的生命周期独立于任何使用该 PV 的容器组。PV 可以[静态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#static)供应或[动态](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#dynamic)供应。 -PersistentVolumeClaim (PVC) 是用户对存储的请求。它与 Pod 类似,Pod 会消耗节点资源,而 PVC 消耗 PV 资源。 +PersistentVolumeClaim (PVC) 是用户对存储的请求。它与容器组类似,容器组会消耗节点资源,而 PVC 消耗 PV 资源。 KubeSphere 支持基于存储类型的[动态卷供应](https://kubernetes.io/zh/docs/concepts/storage/dynamic-provisioning/),以创建 PV。 -[StorageClass](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/) 是管理员描述其提供的存储类型的一种方式。不同的类型可能会映射到不同的服务质量等级或备份策略,或由集群管理员制定的任意策略。每个 StorageClass 都有一个 Provisioner,用于决定使用哪个存储卷插件来供应 PV。该字段必须指定。有关使用哪一个值,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)或与您的存储管理员确认。 +[存储类型](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/)是管理员描述其提供的存储类型的一种方式。不同的类型可能会映射到不同的服务质量等级或备份策略,或由集群管理员制定的任意策略。每个存储类型都有一个 Provisioner,用于决定使用哪个存储卷插件来供应 PV。该字段必须指定。有关使用哪一个值,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)或与您的存储管理员确认。 下表总结了各种 Provisioner(存储系统)常用的存储卷插件。 | 类型 | 描述信息 | | -------------------- | ------------------------------------------------------------ | -| In-tree | 内置并作为 Kubernetes 的一部分运行,例如 [RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd) 和 [Glusterfs](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。有关此类插件的更多信息,请参见 [Provisioner](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)。 | +| In-tree | 内置并作为 Kubernetes 的一部分运行,例如 [RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd) 和 [GlusterFS](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。有关此类插件的更多信息,请参见 [Provisioner](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)。 | | External-provisioner | 独立于 Kubernetes 部署,但运行上类似于树内 (in-tree) 插件,例如 [NFS 客户端](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client)。有关此类插件的更多信息,请参见 [External Storage](https://github.com/kubernetes-retired/external-storage)。 | | CSI | 容器存储接口,一种将存储资源暴露给 CO(例如 Kubernetes)上的工作负载的标准,例如 [QingCloud-CSI](https://github.com/yunify/qingcloud-csi) 和 [Ceph-CSI](https://github.com/ceph/ceph-csi)。有关此类插件的更多信息,请参见 [Drivers](https://kubernetes-csi.github.io/docs/drivers.html)。 | ## 准备工作 -您需要一个拥有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并将它分配至一个帐户。 +您需要一个拥有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并将它分配至一个用户。 ## 管理存储类型 1. 点击左上角的**平台管理**,然后选择**集群管理**。 -2. 如果您启用了[多集群功能](../../multicluster-management/)并导入了 Member 集群,可以选择一个特定集群。如果您未启用该功能,请直接参考下一步。 +2. 如果您启用了[多集群功能](../../multicluster-management/)并导入了成员集群,可以选择一个特定集群。如果您未启用该功能,请直接参考下一步。 -3. 在**集群管理**页面,您可以在**存储管理**下的**存储类型**中创建、更新和删除存储类型。 - - ![存储类型](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-class.PNG) +3. 在**集群管理**页面,您可以在**存储**下的**存储类型**中创建、更新和删除存储类型。 4. 要创建一个存储类型,请点击**创建**,在弹出窗口中输入基本信息。完成后,点击**下一步**。 -5. 在 KubeSphere 中,您可以直接为 `QingCloud-CSI`、`Glusterfs` 和 `Ceph RBD` 创建存储类型。或者,您也可以根据需求为其他存储系统创建自定义存储类型。请选择一个类型,然后点击**下一步**。 - - ![存储系统](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-storage-system.PNG) - - ![存储类型设置](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-settings.PNG) +5. 在 KubeSphere 中,您可以直接为 `QingCloud-CSI`、`GlusterFS` 和 `Ceph RBD` 创建存储类型。或者,您也可以根据需求为其他存储系统创建自定义存储类型。请选择一个类型,然后点击**下一步**。 ### 常用设置 -有些设置在存储类型之间常用且共享。您可以在控制台上的仪表板属性中找到这些设置,StorageClass 清单文件中也通过字段或注解加以显示。您可以在右上角启用**编辑模式**,查看 YAML 格式的清单文件。下表是对 KubeSphere 中一些常用字段的属性说明。 +有些设置在存储类型之间常用且共享。您可以在控制台上的仪表板参数中找到这些设置,存储类型清单文件中也通过字段或注解加以显示。您可以在右上角点击**编辑 YAML**,查看 YAML 格式的配置文件。下表是对 KubeSphere 中一些常用字段的参数说明。 -| 属性 | 描述信息 | +| 参数 | 描述 | | :---- | :---- | -| 允许存储卷扩容 | 在清单文件中由 `allowVolumeExpansion` 指定。若设置为 `true`,PV 则被配置为可扩容。有关更多信息,请参见[允许卷扩展](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#允许卷扩展)。 | +| 存储卷扩容 | 在清单文件中由 `allowVolumeExpansion` 指定。若设置为 `true`,PV 则被配置为可扩容。有关更多信息,请参见[允许卷扩展](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#允许卷扩展)。 | | 回收机制 | 在清单文件中由 `reclaimPolicy` 指定。可设置为 `Delete` 或 `Retain`(默认)。有关更多信息,请参见[回收策略](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#回收策略)。 | | 存储系统 | 在清单文件中由 `provisioner` 指定。它决定使用什么存储卷插件来供应 PV。有关更多信息,请参见 [Provisioner](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#provisioner)。 | -| 支持的访问模式 | 在清单文件中由 `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` 指定。它会向 KubeSphere 表明支持的[访问模式](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#access-modes)。 | +| 访问模式 | 在清单文件中由 `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` 指定。它会向 KubeSphere 表明支持的[访问模式](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#access-modes)。 | +| 存储卷绑定模式 | 在清单文件中由 `volumeBindingMode` 指定。它决定使用何种绑定模式。**延迟绑定**即存储卷创建后,当使用此存储卷的容器组被创建时,此存储卷绑定到一个存储卷实例。**立即绑定**即存储卷创建后,立即绑定到一个存储卷实例。 | 对于其他设置,您需要为不同的存储插件提供不同的信息,它们都显示在清单文件的 `parameters` 字段下。下面将进行详细说明,您也可以参考 Kubernetes 官方文档的[参数](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#参数)部分。 @@ -72,40 +67,40 @@ QingCloud CSI 是 Kubernetes 上的 CSI 插件,供青云QingCloud 存储服务 #### 设置 -![青云存储卷](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-volume-qingcloud.PNG) - -| 属性 | 描述信息 | +| 参数 | 描述信息 | | :---- | :---- | -| type | 在青云QingCloud 平台上,0 代表性能型硬盘,2 代表容量型硬盘,3 代表超高性能型硬盘,5 代表企业级分布式 SAN (NeonSAN) 型硬盘,100 代表基础型硬盘,200 代表企业型硬盘。 | -| maxSize | 存储卷容量上限。 | -| stepSize | 存储卷容量增量。 | -| minSize | 存储卷容量下限。 | -| fsType | 存储卷的文件系统类型:ext3、ext4(默认)、xfs。 | -| tags | QingCloud Tag 资源的 ID,用逗号隔开。 | +| 类型 | 在青云云平台中,0 代表性能型硬盘;2 代表容量型硬盘;3 代表超高性能型硬盘;5 代表企业级分布式 SAN(NeonSAN)型硬盘;100 代表基础型硬盘;200 代表 SSD 企业型硬盘。 | +| 容量上限 | 存储卷容量上限。 | +| 增量值 | 存储卷容量增量。 | +| 容量下限 | 存储卷容量下限。 | +| 文件系统类型 | 支持 ext3、ext4 和 XFS。默认类型为 ext4。 | +| 标签 | 为存储卷添加标签。使用半角逗号(,)分隔多个标签。 | 有关存储类型参数的更多信息,请参见 [QingCloud CSI 用户指南](https://github.com/yunify/qingcloud-csi/blob/master/docs/user-guide.md#set-storage-class)。 -### Glusterfs +### GlusterFS -Glusterfs 是 Kubernetes 上的一种树内存储插件,即您不需要额外安装存储卷插件。 +GlusterFS 是 Kubernetes 上的一种树内存储插件,即您不需要额外安装存储卷插件。 #### 准备工作 -已经安装 Glusterfs 存储系统。有关更多信息,请参见 [GlusterFS 安装文档](https://www.gluster.org/install/)。 +已经安装 GlusterFS 存储系统。有关更多信息,请参见 [GlusterFS 安装文档](https://www.gluster.org/install/)。 #### 设置 -| 属性 | 描述信息 | +| 参数 | 描述 | | :---- | :---- | -| resturl | Gluster REST 服务/Heketi 服务 URL,按需供应 Gluster 存储卷。 | -| clusterid | Heketi 在供应存储卷时使用的集群的 ID。 | -| restauthenabled | Gluster REST 服务认证 Boolean,对 REST 服务器进行认证。 | -| restuser | 在 Glusterfs 受信池中有权限创建存储卷的 Glusterfs REST 服务/Heketi 用户。 | -| secretNamespace, secretName | 识别 Secret 实例,包含与 Gluster REST 服务通信时使用的用户密码。 | -| gidMin, gidMax | StorageClass GID 范围的最大值和最小值。 | -| volumetype | 该可选值可以配置存储卷类型和其参数。 | +| REST URL | 供应存储卷的 Heketi REST URL,例如,<Heketi 服务集群 IP 地址>:<Heketi 服务端口号>。 | +| 集群 ID | Gluster 集群 ID。 | +| 启用 REST 认证 | Gluster 启用对 REST 服务器的认证。 | +| REST 用户 | Gluster REST 服务或 Heketi 服务的用户名。 | +| 密钥所属项目 | Heketi 用户密钥的所属项目。 | +| 密钥名称 | Heketi 用户密钥的名称。 | +| GID 最小值 | 存储卷的 GID 最小值。 | +| GID 最大值 | 存储卷的 GID 最大值。 | +| 存储卷类型 | 存储卷的类型。该值可为 none,replicate:<副本数>,或 disperse:<数据>:<冗余数>。如果未设置该值,则默认存储卷类型为 replicate:3。 | -有关 StorageClass 参数的更多信息,请参见 [Kubernetes 文档中的 Glusterfs](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。 +有关存储类型参数的更多信息,请参见 [Kubernetes 文档中的 GlusterFS](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#glusterfs)。 ### Ceph RBD @@ -113,8 +108,6 @@ Ceph RBD 也是 Kubernetes 上的一种树内存储插件,即 Kubernetes 中 由于 **hyperkube** 镜像[自 1.17 版本开始已被弃用](https://github.com/kubernetes/kubernetes/pull/85094),树内 Ceph RBD 可能无法在不使用 **hyperkube** 的 Kubernetes 上运行。不过,您可以使用 [RBD Provisioner](https://github.com/kubernetes-retired/external-storage/tree/master/ceph/rbd) 作为替代,它的格式与树内 Ceph RBD 相同。唯一不同的参数是 `provisioner`(即 KubeSphere 控制台上的**存储系统**)。如果您想使用 RBD Provisioner,`provisioner` 的值必须为 `ceph.com/rbd`(在**存储系统**中输入该值,如下图所示)。如果您使用树内 Ceph RBD,该值必须为 `kubernetes.io/rbd`。 -![存储系统](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-system.PNG) - #### 准备工作 - 已经安装 Ceph 服务器。有关更多信息,请参见 [Ceph 安装文档](https://docs.ceph.com/en/latest/install/)。 @@ -122,21 +115,21 @@ Ceph RBD 也是 Kubernetes 上的一种树内存储插件,即 Kubernetes 中 #### 设置 -| 属性 | 描述信息 | +| 参数 | 描述 | | :---- | :---- | -| monitors| Ceph 监控器,用逗号隔开。 | -| adminId| 能够在该池中创建镜像的 Ceph 客户端 ID。 | -| adminSecretName| `adminId` 的 Secret 名称。 | -| adminSecretNamespace| `adminSecretName` 的命名空间。 | -| pool | Ceph RBD 池。 | -| userId | 用于映射 RBD 镜像的 Ceph 客户端 ID。 | -| userSecretName | `userId` 的 Ceph Secret 名称,用于映射 RBD 镜像。 | -| userSecretNamespace | `userSecretName` 的命名空间。 | -| fsType | Kubernetes 支持的文件系统类型。 | -| imageFormat | Ceph RBD 镜像格式,可设为 `1` 或 `2`。 | -| imageFeatures| 该参数为可选,仅在 `imageFormat` 设为 `2` 时使用。 | +| monitors| Ceph 集群 Monitors 的 IP 地址。 | +| adminId| Ceph 集群能够创建卷的用户 ID。 | +| adminSecretName| `adminId` 的密钥名称。 | +| adminSecretNamespace| `adminSecret` 所在的项目。 | +| pool | Ceph RBD 的 Pool 名称。 | +| userId | Ceph 集群能够挂载卷的用户 ID。 | +| userSecretName | `userId` 的密钥名称。 | +| userSecretNamespace | `userSecret` 所在的项目。 | +| 文件系统类型 | 存储卷的文件系统类型。 | +| imageFormat | Ceph 卷的选项。该值可为 `1` 或 `2`,选择 `2` 后需要填写 `imageFeatures`。 | +| imageFeatures| Ceph 集群的额外功能。仅当设置 `imageFormat` 为 `2` 时,才需要填写该值。 | -有关 StorageClass 参数的更多信息,请参见 [Kubernetes 文档中的 Ceph RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd)。 +有关存储类型参数的更多信息,请参见 [Kubernetes 文档中的 Ceph RBD](https://kubernetes.io/zh/docs/concepts/storage/storage-classes/#ceph-rbd)。 ### 自定义存储类型 @@ -148,7 +141,7 @@ NFS(网络文件系统)广泛用于带有 [NFS-Client](https://github.com/ku {{< notice note >}} -不建议您在生产环境中使用 NFS 存储(尤其是在 Kubernetes 1.20 或以上版本),这可能会引起 `failed to obtain lock` 和 `input/output error` 等问题,从而导致 Pod `CrashLoopBackOff`。此外,部分应用不兼容 NFS,例如 [Prometheus](https://github.com/prometheus/prometheus/blob/03b354d4d9386e4b3bfbcd45da4bb58b182051a5/docs/storage.md#operational-aspects) 等。 +不建议您在生产环境中使用 NFS 存储(尤其是在 Kubernetes 1.20 或以上版本),这可能会引起 `failed to obtain lock` 和 `input/output error` 等问题,从而导致容器组 `CrashLoopBackOff`。此外,部分应用不兼容 NFS,例如 [Prometheus](https://github.com/prometheus/prometheus/blob/03b354d4d9386e4b3bfbcd45da4bb58b182051a5/docs/storage.md#operational-aspects) 等。 {{}} @@ -159,14 +152,13 @@ NFS(网络文件系统)广泛用于带有 [NFS-Client](https://github.com/ku #### 常用设置 -![自定义存储类型](/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/custom-storage-class.PNG) - -| 属性 | 描述信息 | +| 参数 | 描述信息 | | :---- | :---- | +| 存储卷扩容 | 在清单文件中由 `allowVolumeExpansion` 指定。选择`否`。 | +| 回收机制 | 在清单文件中由 `reclaimPolicy` 指定。 | | 存储系统 | 在清单文件中由 `provisioner` 指定。如果您使用 [NFS-Client 的 Chart](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner) 来安装存储类型,可以设为 `cluster.local/nfs-client-nfs-client-provisioner`。 | -| 允许存储卷扩容 | 在清单文件中由 `allowVolumeExpansion` 指定。选择 `No`。 | -| 回收机制 | 在清单文件中由 `reclaimPolicy` 指定。默认值为 Delete。 | -| 支持的访问模式 | 在清单文件中由 `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` 指定。默认 `ReadWriteOnce`、`ReadOnlyMany` 和 `ReadWriteMany` 全选。 | +| 访问模式 | 在清单文件中由 `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` 指定。默认 `ReadWriteOnce`、`ReadOnlyMany` 和 `ReadWriteMany` 全选。 | +| 存储卷绑定模式 | 在清单文件中由 `volumeBindingMode` 指定。它决定使用何种绑定模式。**延迟绑定**即存储卷创建后,当使用此存储卷的容器组被创建时,此存储卷绑定到一个存储卷实例。**立即绑定**即存储卷创建后,立即绑定到一个存储卷实例。 | #### 参数 @@ -174,6 +166,55 @@ NFS(网络文件系统)广泛用于带有 [NFS-Client](https://github.com/ku | :---- | :---- | :----| | archiveOnDelete | 删除时存档 PVC | `true` | +### 存储类型详情页 + +创建存储类型后,点击此存储类型的名称前往其详情页。在详情页点击**编辑 YAML** 来编辑此存储类型的清单文件,或点击**更多操作**并在下拉菜单中选择一项操作: + +- **设为默认存储类型**:将此存储类型设为集群的默认存储类型。一个 KubeSphere 集群中仅允许设置一个默认存储类型。 +- **存储卷管理**:管理存储卷功能,包括:**存储卷克隆**、**存储卷快照**、**存储卷扩容**。开启任意功能前,请联系系统管理员确认存储系统是否支持这些功能。 +- **删除**:删除此存储类型并返回上一页。 + +在**存储卷**页签上,查看与此存储类型相关联的存储卷。 + ## 管理存储卷 存储类型创建后,您可以使用它来创建存储卷。您可以在 KubeSphere 控制台上的**存储管理**下面的**存储卷**中列示、创建、更新和删除存储卷。有关更多详细信息,请参见[存储卷管理](../../project-user-guide/storage/volumes/)。 + +## 管理存储卷实例 + +KubeSphere 中的存储卷即 Kubernetes 中的[持久卷声明](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims),存储卷实例即 Kubernetes 中的[持久卷](https://kubernetes.io/zh/docs/concepts/storage/persistent-volumes/)。 + +### 存储卷实例列表页面 + +1. 以 `admin` 身份登录 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**,然后在左侧导航栏点击**存储**下的**存储卷**。 +2. 在**存储卷**页面,点击**存储卷实例**页签,查看存储卷实例列表页面。该页面提供以下信息: + - **名称**:存储卷实例的名称,在该存储卷实例的清单文件中由 `.metadata.name` 字段指定。 + - **状态**:存储卷实例的当前状态,在该存储卷实例的清单文件中由 `.status.phase` 字段指定,包括: + - **可用**:存储卷实例可用,尚未绑定至存储卷。 + - **已绑定**:存储卷实例已绑定至存储卷。 + - **删除中**:正在删除存储卷实例。 + - **失败**:存储卷实例不可用。 + - **容量**:存储卷实例的容量,在该存储卷实例的清单文件中由 `.spec.capacity.storage` 字段指定。 + - **访问模式**:存储卷实例的访问模式,在该存储卷实例的清单文件中由 `.spec.accessModes` 字段指定,包括: + - **RWO**:存储卷实例可挂载为单个节点读写。 + - **ROX**:存储卷实例可挂载为多个节点只读。 + - **RWX**:存储卷实例可挂载为多个节点读写。 + - **回收策略**:存储卷实例的回收策略,在该存储卷实例的清单文件中由 `.spec.persistentVolumeReclaimPolicy` 字段指定,包括: + - **Retain**:删除存储卷后,保留该存储卷实例,需要手动回收。 + - **Delete**:删除该存储卷实例,同时从存储卷插件的基础设施中删除所关联的存储设备。 + - **Recycle**:清除存储卷实例上的数据,使该存储卷实例可供新的存储卷使用。 + - **创建时间**:存储卷实例的创建时间。 +3. 点击存储卷实例右侧的 并在下拉菜单中选择一项操作: + - **编辑**:编辑存储卷实例的 YAML 文件。 + - **查看 YAML**:查看存储卷实例的 YAML 文件。 + - **删除**:删除存储卷实例。处于**已绑定**状态的存储卷实例不可删除。 + +### 存储卷实例详情页面 + +1. 点击存储卷实例的名称,进入其详情页面。 +2. 在详情页面,点击**编辑信息**以编辑存储卷实例的基本信息。点击**更多操作**,在下拉菜单中选择一项操作: + - **查看 YAML**:查看存储卷实例的 YAML 文件。 + - **删除**:删除存储卷实例并返回列表页面。处于**已绑定**状态的存储卷实例不可删除。 +3. 点击**资源状态**页签,查看存储卷实例所绑定的存储卷。 +4. 点击**元数据**页签,查看存储卷实例的标签和注解。 +5. 点击**事件**页签,查看存储卷实例的事件。 diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md index 5f50dc9bb..3caf355b0 100644 --- a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-dingtalk.md @@ -3,7 +3,7 @@ title: "配置钉钉通知" keywords: 'KubeSphere, Kubernetes, 钉钉, 通知, 告警' description: '配置钉钉通知并添加会话或群机器人来接收告警通知消息。' linkTitle: "配置钉钉通知" -weight: 8722 +weight: 8723 --- 本教程演示如何配置钉钉通知并添加会话或群机器人来接收告警策略的通知。 @@ -22,50 +22,28 @@ weight: 8722 3. 选择**小程序**,然后点击**创建应用**。 - ![create-app](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-app.png) - 4. 在弹出对话框中,填写**应用名称**和**应用描述**,本教程均输入`通知测试`作为示例,**开发方式**选择**企业自助开发**,然后点击**确定创建**。 - ![app-info](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-info.png) - 5. 创建应用后,在基础信息页面可以查看此应用的 **AppKey** 和 **AppSecret**。 - ![app-page](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-page.png) - 6. 在**开发管理**页面,点击**修改**。 - ![dev-mgt](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dev-mgt.png) - 7. 您需要在**服务器出口IP** 中输入所有节点的公网 IP,然后点击**保存**。 - ![enter-public-ip](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/enter-public-ip.png) - 8. 前往**权限管理**页面,在搜索框中搜索`根据手机号姓名获取成员信息的接口访问权限`,然后点击**申请权限**。 - ![userid-phone](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/userid-phone.png) - 9. 继续在搜索框中搜索`群会话`,勾选 **chat相关接口的管理权限**和 **chat相关接口的读取权限**,然后点击**批量申请(2)**。 - ![apply-for-access](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/apply-for-access.png) - 10. 在弹出对话框中,填写**联系人**、**联系方式**和**申请原因**,然后点击**申请**。待审核通过后,您需要在**权限管理**页面的**全部状态**中筛选**已开通**,点击**确定**,然后手动点击 **chat相关接口的管理权限**和 **chat相关接口的读取权限**右侧的**申请权限**。 - ![application-form](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/application-form.png) - - ![activate-chat-access](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/activate-chat-access.png) - ### 步骤 2:获取会话 ID 目前钉钉官方仅提供一种途径来获取会话 ID,即通过创建会话时的返回值来获取。如果您已知会话 ID,或者不需要通过会话接收通知消息,可跳过此步骤。 1. 登录[钉钉 API Explorer](https://open-dev.dingtalk.com/apiExplorer#/?devType=org&api=dingtalk.oapi.gettoken),在**获取凭证**下的**获取企业凭证**页面,填写 **appkey** 和 **appsecret**,点击**发起调用**,即可在右侧获取 `access_token`。 - ![access-token](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/access-token.png) - 2. 在**通讯录管理**下的**用户管理**页面,选择**根据手机号获取userid**,**access_token** 已自动预先填写,在 **mobile** 中填写用户手机号,然后点击**发起调用**,即可在右侧获取 `userid`。 - ![user-id](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/user-id.png) - {{< notice note>}} 您只需获取群主的 userid,待创建会话后再在客户端添加群成员。 @@ -74,59 +52,56 @@ weight: 8722 3. 在**消息通知**下的**群消息**页面,选择**创建群会话**,**access_token** 已自动预先填写,在 **name**、**owner** 和 **useridlist** 中分别填写群名称(本教程使用 `test` 作为示例,您可以按需自行设置)、群主的 userid 和群成员的 userid,然后点击**发起调用**,即可在右侧获取 `chatid`。 - ![chat-id](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/chat-id.png) - ### 步骤 3:创建群机器人(可选) 如果您不需要通过群机器人接收通知消息,可跳过此步骤。 1. 登录钉钉电脑客户端,点击用户头像,选择**机器人管理**。 - ![click-robot](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-robot.png) - 2. 在弹出对话框中,选择**自定义**,然后点击**添加**。 - ![click-custom](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-custom.png) - - ![click-add](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-add.png) - 3. 在弹出对话框的**机器人名字**中输入名字(例如`告警通知`),在**添加到群组**中选择群组,在**安全设置**中设置**自定义关键词**和**加签**,勾选**我已阅读并同意《自定义机器人服务及免责条款》**,然后点击**完成**。 - ![create-robot](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-robot.png) - {{< notice note >}} 机器人创建完成后不可修改群组。 {{}} -4. 您可以在**机器人管理**页面点击已创建机器人右侧的 ,查看机器人的具体设置信息,例如 **Webhook**、**自定义关键词**和**加签**。 - - ![view-robot](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/view-robot.png) - - ![robot-configs](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/robot-configs.png) +4. 您可以在**机器人管理**页面点击已创建机器人右侧的 ,查看机器人的具体设置信息,例如 **Webhook**、**自定义关键词**和**加签**。 ### 步骤 4:在 KubeSphere 控制台配置钉钉通知 您必须在 KubeSphere 控制台提供钉钉的通知设置,以便 KubeSphere 将通知发送至您的钉钉。 -1. 使用具有 `platform-admin` 角色的帐户(例如,`admin`)登录 KubeSphere Web 控制台。 +1. 使用具有 `platform-admin` 角色的用户(例如,`admin`)登录 KubeSphere Web 控制台。 2. 点击左上角的**平台管理**,选择**平台设置**。 -3. 前往**通知管理**下的**钉钉**。 +3. 前往**通知管理**下的**通知配置**,选择**钉钉**。 - ![dingtalk-configurations](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configurations.png) +4. 您可以在**会话设置**下的 **AppKey**、**AppSecret** 和**会话 ID** 中分别输入您的钉钉应用 AppKey、AppSecret、会话 ID,然后点击**添加**以添加会话 ID,您可以添加多个会话 ID。此外,您也可以在**群机器人设置**下的 **Webhook URL**、**关键词**和**密钥**中分别输入您的钉钉机器人 Webhook URL、关键词(输入关键词后请点击**添加**以添加关键词)、加签。操作完成后,点击**确定**。 -4. 您可以在**会话设置**下的 **AppKey**、**AppSecret** 和**会话 ID** 中分别输入您的钉钉应用 AppKey、AppSecret、会话 ID,然后点击**添加**以添加会话 ID,您可以添加多个会话 ID。此外,您也可以在**群机器人设置**下的 **Webhook 地址**、**关键字**和**密钥**中分别输入您的钉钉机器人 Webhook 地址、关键词(输入关键词后请点击**添加**以添加关键词)、加签。操作完成后,点击**保存**。 +5. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} - ![dingtalk-configured](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configured.png) + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 来删除通知条件。 -5. 开启**接收通知**并点击**更新**。 +6. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +7. 在右上角,打开**未启用**开关来接收钉钉通知,或者关闭**已启用**开关来停用钉钉通知。 {{< notice note >}} - 如果您更改了现有配置,则必须点击**更新**以应用更改。 + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 {{}} diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-email.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-email.md index e7ac17207..876b47c31 100644 --- a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-email.md +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-email.md @@ -1,46 +1,64 @@ --- title: "配置邮件通知" keywords: 'KubeSphere, Kubernetes, 自定义, 平台' -description: '配置邮件服务器并添加接收人以接收告警策略、事件、审计等邮件通知。' +description: '配置邮件服务器并添加接收人以接收邮件通知。' linkTitle: "配置邮件通知" -weight: 8721 +weight: 8722 --- 本教程演示如何配置邮件通知及添加接收人,以便接收告警策略的邮件通知。 ## 配置邮件服务器 -1. 使用具有 `platform-admin` 角色的帐户登录 Web 控制台。 +1. 使用具有 `platform-admin` 角色的用户登录 Web 控制台。 2. 点击左上角的**平台管理**,选择**平台设置**。 -3. 导航至**通知管理**下的**邮件**。 +3. 导航至**通知管理**下的**通知配置**,选择**邮件**。 - ![email-server](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/email-server.png) - -4. 在**服务器配置**下,填写以下字段配置邮件服务器。 +4. 在**服务器设置**下,填写以下字段配置邮件服务器。 - **SMTP 服务器地址**:能够提供邮件服务的 SMTP 服务器地址。端口通常是 `25`。 - **使用 SSL 安全连接**:SSL 可以用于加密邮件,从而提高通过邮件传输的信息的安全性。通常来说,您必须为邮件服务器配置证书。 - - **SMTP 用户**:SMTP 帐户。 - - **SMTP 密码**:SMTP 帐户密码。 - - **发件人邮箱**:发件人的邮箱地址。目前不支持自定义邮箱地址。 + - **SMTP 用户名**:SMTP 用户的名称。 + - **SMTP 密码**:SMTP 帐户的密码。 + - **发件人邮箱**:发件人的邮箱地址。 -5. 点击**保存**。 +5. 点击**确定**。 -## 添加接收人 +## 接收设置 + +### 添加接收人 1. 在**接收设置**下,输入接收人的邮箱地址,点击**添加**。 -2. 添加完成后,接收人的邮箱地址将在**接收设置**下列出。您最多可以添加 50 位接收人,所有接收人都将能收到告警通知。 +2. 添加完成后,接收人的邮箱地址将在**接收设置**下列出。您最多可以添加 50 位接收人,所有接收人都将能收到通知。 -3. 若想移除接收人,请将鼠标悬停在想要移除的邮箱地址上,然后点击右侧出现的垃圾桶图标。 +3. 若想移除接收人,请将鼠标悬停在想要移除的邮箱地址上,然后点击右侧的 。 -4. 若要确保将通知发送至接收人,请开启**接收通知**并点击**更新**。 +### 设置通知条件 + +1. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + +2. 您可以点击**添加**来添加多个通知条件。 + +3. 您可以点击通知条件右侧的 来删除通知条件。 + +4. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +5. 在右上角,打开**未启用**开关来接收邮件通知,或者关闭**已启用**开关来停用邮件通知。 {{< notice note >}} - - 如果您更改了现有配置,则必须点击**更新**以应用更改。 + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 {{}} @@ -48,10 +66,6 @@ weight: 8721 配置邮件通知并添加接收人后,您需要启用 [KubeSphere 告警](../../../../pluggable-components/alerting/),并为工作负载或节点创建告警策略。告警触发后,所有接收人都将能收到邮件通知。 -以下图片为邮件通知的示例: - -![example-email-notification](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png) - {{< notice note >}} - 如果您更新了邮件服务器配置,KubeSphere 将根据最新配置发送邮件通知。 diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack.md index c666b27cb..eaa26802b 100644 --- a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack.md +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-slack.md @@ -3,7 +3,7 @@ title: "配置 Slack 通知" keywords: 'KubeSphere, Kubernetes, Slack, 通知' description: '配置 Slack 通知及添加频道来接收告警策略、事件、审计等通知。' linkTitle: "配置 Slack 通知" -weight: 8724 +weight: 8725 --- 本教程演示如何配置 Slack 通知及添加频道,以便接收告警策略的通知。 @@ -24,29 +24,23 @@ weight: 8724 4. 在左侧导航栏中,选择 **Features** 下的 **OAuth & Permissions**。在 **Auth & Permissions** 页面,下滑到 **Scopes**,分别点击 **Bot Token Scopes** 和 **User Token Scopes** 下的 **Add an OAuth Scope**,两者都选择 **chart:write** 权限。 - ![slack-scope](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png) - 5. 上滑到 **OAuth Tokens & Redirect URLs**,点击 **Install to Workspace**。授予该应用访问您工作区的权限,您可以在 **OAuth Tokens for Your Team** 下看到已创建的令牌。 - ![oauth-token](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png) - ## 在 KubeSphere 控制台上配置 Slack 通知 您必须在 KubeSphere 控制台提供 Slack 令牌用于认证,以便 KubeSphere 将通知发送至您的频道。 -1. 使用具有 `platform-admin` 角色的帐户登录 Web 控制台。 +1. 使用具有 `platform-admin` 角色的用户登录 Web 控制台。 2. 点击左上角的**平台管理**,选择**平台设置**。 -3. 导航到**通知管理**下的 **Slack**。 +3. 导航到**通知管理**下的**通知配置**,选择 **Slack**。 - ![slack-notification](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png) - -4. 对于**服务器配置**下的 **Slack Token**,您可以选择使用 User OAuth Token 或者 Bot User OAuth Token 进行认证。如果使用 User OAuth Token,将由应用所有者往您的 Slack 频道发送通知;如果使用 Bot User OAuth Token,将由应用发送通知。 +4. 对于**服务器设置**下的 **Slack 令牌**,您可以选择使用 User OAuth Token 或者 Bot User OAuth Token 进行认证。如果使用 User OAuth Token,将由应用所有者往您的 Slack 频道发送通知;如果使用 Bot User OAuth Token,将由应用发送通知。 5. 在**接收频道设置**下,输入您想要收取通知的频道,点击**添加**。 -6. 添加完成后,该频道将在**已设置频道**下列出。您最多可以添加 20 个频道,所有已添加的频道都将能够收到告警通知。 +6. 添加完成后,该频道将在**已添加的频道**下列出。您最多可以添加 20 个频道,所有已添加的频道都将能够收到告警通知。 {{< notice note >}} @@ -54,28 +48,37 @@ weight: 8724 {{}} -7. 点击**保存**。 - -8. 若想确保通知将会发送到 Slack 频道,请开启**接收通知**并点击**更新**。 +7. 点击**确定**。 +8. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} - 如果您更改了现有配置,则必须点击**更新**以应用更改。 + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 来删除通知条件。 + +9. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +10. 在右上角,打开**未启用**开关来接收 Slack 通知,或者关闭**已启用**开关来停用 Slack 通知。 + + {{< notice note >}} + + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 {{}} -9. 若想由应用发送通知,请确保将其加入频道。请在 Slack 频道中输入 `/invite @` 将应用加入至该频道。 - - ![add-app](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png) +11. 若想由应用发送通知,请确保将其加入频道。请在 Slack 频道中输入 `/invite @` 将应用加入至该频道。 ## 接收 Slack 通知 配置 Slack 通知并添加频道后,您需要启用 [KubeSphere 告警](../../../../pluggable-components/alerting/),并为工作负载或节点创建告警策略。告警触发后,列表中的全部频道都将能接收通知。 -以下图片为 Slack 通知的示例: - -![example-notification](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/example-notification1.png) - {{< notice note >}} - 如果您更新了 Slack 通知配置,KubeSphere 将根据最新配置发送通知。 diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md index e1b2289f0..e4f41aab9 100644 --- a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-webhook.md @@ -1,10 +1,63 @@ --- -title: "Configure Webhook" -keywords: 'KubeSphere, Kubernetes, custom, platform' -description: '' -linkTitle: "Configure Webhook" -weight: 8725 +title: "配置 Webhook 通知" +keywords: 'KubeSphere, Kubernetes, 自定义, 平台, Webhook' +description: '配置 webhook 服务器以通过 webhook 接收平台通知。' +linkTitle: "配置 Webhook 通知" +weight: 8726 --- -TBD +Webhook 是应用程序发送由特定事件触发的通知的一种方式,可以实时向其他应用程序发送信息,使用户可以立即接收通知。 +本教程介绍如何配置 Webhook 服务器以接收平台通知。 + +## 准备工作 + +您需要准备一个被授予 `platform-admin` 角色的用户。有关详细信息,请参阅[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 配置 Webhook 服务器 + +1. 以 `platform-admin` 用户身份登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**平台设置**。 + +3. 在左侧导航栏中,点击**通知管理**下的**通知配置**,选择 **Webhook**。 + +4. 在 **Webhook** 标签页,设置如下参数: + + - **Webhook URL**:Webhook 服务器的 URL。 + + - **认证类型**:Webhook 身份认证方法。 + - **无需认证**:无身份认证,所有通知都可以发送到该 URL。 + - **Bearer 令牌**:使用令牌进行身份认证。 + - **基础认证**:使用用户名和密码进行身份认证。 + + {{< notice note>}}目前,KubeSphere 不支持 TLS 连接(HTTPS)。如果您使用 HTTPS URL,则需要选择**跳过 TLS 认证(不安全)**。 + + {{}} + +5. 勾选**通知条件**左侧的复选框,设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。可以选择标签,也可以自定义标签。 + - **操作符**:标签和值之间的映射。操作符包括**包含值**、**不包含值**、**存在**和**不存在**。 + - **值**:与标签关联的值。 + {{< notice note >}} + + - 操作符**包含值**和**不包含值**需要一个或多个标签值。使用回车符来分隔标签值。 + - 操作符**存在**和**不存在**判断标签是否存在,不需要标签值。 + + {{}} + +6. 点击**添加**来添加通知条件,也可以点击通知条件右侧的 来删除条件。 + +7. 配置完成后,可以点击**发送测试信息**进行验证。 + +8. 在右上角,可以打开**未开启**开关以启用通知,或关闭**已开启**开关以禁用通知。 + +9. 完成后点击**确定**。 + + {{< notice note >}} + + - 设置通知条件后,接收方只会收到满足条件的通知。 + - 如果更改现有配置,则必须点击**确定**才能应用修改后的配置。 + + {{}} diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md index ff830ed38..caf745ef7 100644 --- a/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/configure-wecom.md @@ -3,7 +3,7 @@ title: "配置企业微信通知" keywords: 'KubeSphere, Kubernetes, 企业微信, 通知, 告警' description: '配置企业微信通知并添加相应 ID 来接收告警通知消息。' linkTitle: "配置企业微信通知" -weight: 8723 +weight: 8724 --- 本教程演示如何配置企业微信通知并添加相应 ID 来接收告警策略的通知。 @@ -20,12 +20,8 @@ weight: 8723 2. 在**应用管理**页面,点击**自建**下的**创建应用**。 - ![click-create-app](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/click-create-app.png) - 3. 在**创建应用**页面,上传应用 Logo、输入应用名称(例如,`通知测试`),点击**选择部门 / 成员**编辑**可见范围**,然后点击**创建应用**。 - ![create-app](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-app.png) - {{< notice note >}} 请确保将需要接收通知的用户、部门或标签加入可见范围中。 @@ -34,69 +30,58 @@ weight: 8723 4. 应用创建完成后即可查看其详情页面,**AgentId** 右侧显示该应用的 ID。点击 **Secret** 右侧的**查看**,然后在弹出对话框中点击**发送**,便可以在企业微信客户端查看 Secret。此外,您还可以点击**编辑**来编辑可见范围。 - ![app-detail](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-detail.png) - - ![view-secret](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/view-secret.png) - ### 步骤 2:创建部门或标签 1. 在**通讯录**页面的**组织架构**选项卡下,点击**测试**(本教程使用`测试`部门作为示例)右侧的 ,然后选择**添加子部门**。 - ![add-dept](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-dept.png) - 2. 在弹出对话框中,输入部门名称(例如`测试二组`),然后点击**确定**。 - ![enter-dept-name](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/enter-dept-name.png) - 3. 创建部门后,您可以点击右侧的**添加成员**、**批量导入**或**从其他部门移入**来添加成员。添加成员后,点击该成员进入详情页面,查看其帐号。 - ![add-member](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member.png) - - ![member-account](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/member-account.png) - 4. 您可以点击`测试二组`右侧的 来查看其部门 ID。 - ![dept-id](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/dept-id.png) - 5. 点击**标签**选项卡,然后点击**添加标签**来创建标签。若管理界面无**标签**选项卡,请点击加号图标来创建标签。 - ![add-tag](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-tag.png) - 6. 在弹出对话框中,输入标签名称,例如`组长`。您可以按需指定**可使用人**,点击**确定**完成操作。 - ![create-tag](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-tag.png) - 7. 创建标签后,您可以点击右侧的**添加部门/成员**或**批量导入**来添加部门或成员。点击**标签详情**进入详情页面,可以查看此标签的 ID。 - ![add-member-to-tag](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member-to-tag.png) - - ![tag-id](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/tag-id.png) - 8. 要查看企业 ID,请点击**我的企业**,在**企业信息**页面查看 ID。 - ![company-id](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/company-id.png) - ### 步骤 3:在 KubeSphere 控制台配置企业微信通知 您必须在 KubeSphere 控制台提供企业微信的相关 ID 和凭证,以便 KubeSphere 将通知发送至您的企业微信。 -1. 使用具有 `platform-admin` 角色的帐户(例如,`admin`)登录 KubeSphere Web 控制台。 +1. 使用具有 `platform-admin` 角色的用户(例如,`admin`)登录 KubeSphere Web 控制台。 2. 点击左上角的**平台管理**,选择**平台设置**。 -3. 前往**通知管理**下的**企业微信**。 +3. 前往**通知管理**下的**通知配置**,选择**企业微信**。 - ![platform](/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/platform.png) - -4. 在**服务器配置**下的**企业 ID**、**企业应用 ID** 以及**企业应用凭证**中分别输入您的企业 ID、应用 AgentId 以及应用 Secret。 +4. 在**服务器设置**下的**企业 ID**、**应用 AgentId** 以及**应用 Secret** 中分别输入您的企业 ID、应用 AgentId 以及应用 Secret。 5. 在**接收设置**中,从下拉列表中选择**用户 ID**、**部门 ID** 或者**标签 ID**,输入对应 ID 后点击**添加**。您可以添加多个 ID。 -6. 点击**保存**,然后开启**接收通知**并点击**更新**。 +6. 勾选**通知条件**左侧的复选框即可设置通知条件。 + + - **标签**:告警策略的名称、级别或监控目标。您可以选择一个标签或者自定义标签。 + - **操作符**:标签与值的匹配关系,包括**包含值**,**不包含值**,**存在**和**不存在**。 + - **值**:标签对应的值。 + {{< notice note >}} + - 操作符**包含值**和**不包含值**需要添加一个或多个标签值。使用回车分隔多个值。 + - 操作符**存在**和**不存在**判断某个标签是否存在,无需设置标签值。 + {{}} + + 您可以点击**添加**来添加多个通知条件,或点击通知条件右侧的 来删除通知条件。 + +7. 配置完成后,您可以点击右下角的**发送测试信息**进行验证。 + +8. 在右上角,打开**未启用**开关来接收企业微信通知,或者关闭**已启用**开关来停用企业微信通知。 {{< notice note >}} - 如果您更改了现有配置,则必须点击**更新**以应用更改。 + - 通知条件设置后,接收人只会接受符合条件的通知。 + - 如果您更改了现有配置,则必须点击**确定**以应用更改。 {{}} diff --git a/content/zh/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md b/content/zh/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md new file mode 100644 index 000000000..0d6079b90 --- /dev/null +++ b/content/zh/docs/cluster-administration/platform-settings/notification-management/customize-cluster-name.md @@ -0,0 +1,40 @@ +--- +title: "自定义通知消息中的集群名称" +keywords: 'KubeSphere, Kubernetes, 平台, 通知' +description: '了解如何自定义 KubeSphere 发送的通知消息中的集群名称。' +linkTitle: "自定义通知消息中的集群名称" +weight: 8721 +--- + +本文档说明如何自定义 KubeSphere 发送的通知消息中的集群名称。 + +## 准备工作 + +您需要有一个具有 `platform-admin` 角色的用户,例如 `admin` 用户。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 + +## 自定义通知消息中的集群名称 + +1. 以 `admin` 用户登录 KubeSphere 控制台。 + +2. 点击右下角的 并选择 **Kubectl**。 + +3. 在弹出的对话框中,执行以下命令: + + ```bash + kubectl edit nm notification-manager + ``` + +4. 在 `.spec.receiver.options.global` 下方添加 `cluster` 字段以自定义您的集群名称: + + ```yaml + spec: + receivers: + options: + global: + cluster: <集群名称> + ``` + +5. 完成操作后,请保存更改。 + + + diff --git a/content/zh/docs/devops-user-guide/_index.md b/content/zh/docs/devops-user-guide/_index.md index bcefac6e9..1dab4af23 100644 --- a/content/zh/docs/devops-user-guide/_index.md +++ b/content/zh/docs/devops-user-guide/_index.md @@ -1,6 +1,6 @@ --- title: "DevOps 用户指南" -description: "开始使用 KubeSphere DevOps 工程" +description: "开始使用 KubeSphere DevOps 项目" layout: "second" linkTitle: "DevOps 用户指南" @@ -9,6 +9,6 @@ weight: 11000 icon: "/images/docs/docs.svg" --- -您可以使用 KubeSphere DevOps 系统在 Kubernetes 集群上部署和管理 CI/CD 任务以及相关的工作负载。本章演示如何在 DevOps 工程中进行管理和操作,包括运行流水线、创建凭证和集成工具等等。 +您可以使用 KubeSphere DevOps 系统在 Kubernetes 集群上部署和管理 CI/CD 任务以及相关的工作负载。本章演示如何在 DevOps 项目中进行管理和操作,包括运行流水线、创建凭证和集成工具等等。 您安装 DevOps 组件时,会自动部署 Jenkins。您可以在 KubeSphere 中像以前一样通过 Jenkinsfile 构建流水线,保持一致的用户体验。此外,KubeSphere 还提供图形编辑面板,可以将整个流程可视化,为您直观地呈现流水线在每个阶段的运行状态。 diff --git a/content/zh/docs/devops-user-guide/examples/a-maven-project.md b/content/zh/docs/devops-user-guide/examples/a-maven-project.md index 20fff098c..2c411513b 100644 --- a/content/zh/docs/devops-user-guide/examples/a-maven-project.md +++ b/content/zh/docs/devops-user-guide/examples/a-maven-project.md @@ -1,8 +1,8 @@ --- -title: "构建和部署 Maven 工程" +title: "构建和部署 Maven 项目" keywords: 'Kubernetes, Docker, DevOps, Jenkins, Maven' -description: '学习如何使用 KubeSphere 流水线构建并部署 Maven 工程。' -linkTitle: "构建和部署 Maven 工程" +description: '学习如何使用 KubeSphere 流水线构建并部署 Maven 项目。' +linkTitle: "构建和部署 Maven 项目" weight: 11430 --- @@ -10,11 +10,11 @@ weight: 11430 - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 - 您需要有一个 [Docker Hub](http://www.dockerhub.com/) 帐户。 -- 您需要创建一个企业空间、一个 DevOps 工程和一个用户帐户,并需要邀请该帐户至 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户,并需要邀请该用户至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 -## Maven 工程的工作流 +## Maven 项目的工作流 -KubeSphere DevOps 中有针对 Maven 工程的工作流,如下图所示,它使用 Jenkins 流水线来构建和部署 Maven 工程。所有步骤均在流水线中进行定义。 +KubeSphere DevOps 中有针对 Maven 项目的工作流,如下图所示,它使用 Jenkins 流水线来构建和部署 Maven 项目。所有步骤均在流水线中进行定义。 ![maven-project-jenkins](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/maven-project-jenkins.png) @@ -54,46 +54,38 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml ## Maven 流水线示例 -### Maven 工程准备工作 +### Maven 项目准备工作 -- 确保您在开发设备上成功构建 Maven 工程。 -- 添加 Dockerfile 至工程仓库以构建镜像。有关更多信息,请参考 。 -- 添加 YAML 文件至工程仓库以部署工作负载。有关更多信息,请参考 。如果有多个不同环境,您需要准备多个部署文件。 +- 确保您在开发设备上成功构建 Maven 项目。 +- 添加 Dockerfile 至项目仓库以构建镜像。有关更多信息,请参考 。 +- 添加 YAML 文件至项目仓库以部署工作负载。有关更多信息,请参考 。如果有多个不同环境,您需要准备多个部署文件。 ### 创建凭证 | 凭证 ID | 类型 | 用途 | | --------------- | ---------- | --------------------- | -| dockerhub-id | 帐户凭证 | 仓库,例如 Docker Hub | +| dockerhub-id | 用户名和密码 | 仓库,例如 Docker Hub | | demo-kubeconfig | kubeconfig | 部署工作负载 | 有关详细信息,请参考[凭证管理](../../how-to-use/credential-management/)。 -![查看凭证列表](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-credential_lists.png) - ### 为工作负载创建一个项目 在本示例中,所有工作负载都部署在 `kubesphere-sample-dev` 项目中。您必须事先创建 `kubesphere-sample-dev` 项目。 -![查看项目](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view_namespace.png) +### 为 Maven 项目创建一个流水线 -### 为 Maven 工程创建一个流水线 - -1. 在您的 DevOps 工程中,转到**流水线**页面并点击**创建**,创建一个名为 `maven` 的流水线。有关更多信息,请参见[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel)。 +1. 在您的 DevOps 项目中,转到**流水线**页面并点击**创建**,创建一个名为 `maven` 的流水线。有关更多信息,请参见[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel)。 2. 转到该流水线的详情页面,点击**编辑 Jenkinsfile**。 - ![编辑 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/edit_jenkinsfile.png) - 3. 复制粘贴以下内容至弹出窗口。您必须将 `DOCKERHUB_NAMESPACE` 的值替换为您自己的值,编辑完成后点击**确定**保存 Jenkinsfile。 ```groovy pipeline { - agent { - node { - label 'maven' + agent { + label 'maven' } - } parameters { string(name:'TAG_NAME',defaultValue: '',description:'') @@ -105,21 +97,23 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml REGISTRY = 'docker.io' // 需要更改为您自己的 Docker Hub Namespace DOCKERHUB_NAMESPACE = 'Docker Hub Namespace' - APP_NAME = 'devops-java-sample' + APP_NAME = 'devops-maven-sample' BRANCH_NAME = 'dev' + PROJECT_NAME = 'kubesphere-sample-dev' } stages { stage ('checkout scm') { steps { - git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git" + // 下方所用的 GitHub 仓库仅用作体验功能的示例,请避免向该仓库提交包含测试性改动的 PR + git branch: 'master', url: "https://github.com/kubesphere/devops-maven-sample.git" } } stage ('unit test') { steps { container ('maven') { - sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test' + sh 'mvn clean test' } } } @@ -127,7 +121,7 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml stage ('build & push') { steps { container ('maven') { - sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package' + sh 'mvn -Dmaven.test.skip=true clean package' sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' @@ -138,9 +132,17 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml } stage('deploy to dev') { - steps { - kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID") - } + steps { + container ('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } + } } } } @@ -148,22 +150,13 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml 4. 您可以看到图形编辑面板上已自动创建阶段和步骤。 - ![查看 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-edit_jenkinsfile.png) - ### 运行和测试 1. 点击**运行**并在弹出对话框的 **TAG_NAME** 中输入 `v1`,然后点击**确定**运行流水线。 - ![运行 Maven 流水线](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/run-maven_pipeline.png) - -2. 待流水线成功运行,您可以前往**活动**选项卡查看其详情。 - - ![查看流水线运行结果](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_pipeline.png) +2. 待流水线成功运行,您可以前往**运行记录**选项卡查看其详情。 3. 在 `kubesphere-sample-dev` 项目中,已创建新的工作负载。 - ![查看工作负载](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_workload.png) +4. 在**服务**页面,查看服务 (Service) 的外部访问信息。 -4. 您可以查看服务 (Service) 的访问地址,如下所示。 - - ![查看服务](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload_svc.png) diff --git a/content/zh/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md b/content/zh/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md index c67330250..9f33a3b1e 100644 --- a/content/zh/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md +++ b/content/zh/docs/devops-user-guide/examples/create-multi-cluster-pipeline.md @@ -12,11 +12,11 @@ weight: 11440 ## 准备工作 -- 准备三个已安装 KubeSphere 的 Kubernetes 集群,选择一个集群作为 Host 集群,其余两个作为 Member 集群。更多关于集群角色与如何在 KubeSphere 上启用多集群环境,请参见[多集群管理](../../../multicluster-management/)。 -- 将 Member 集群设置为[公开集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#将集群设置为公开集群)。或者,您可以[在工作空间创建之后设置集群可见性](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#在创建企业空间后设置集群可见性)。 -- 在 Host 集群上[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 +- 准备三个已安装 KubeSphere 的 Kubernetes 集群,选择一个集群作为主集群,其余两个作为成员集群。更多关于集群角色与如何在 KubeSphere 上启用多集群环境,请参见[多集群管理](../../../multicluster-management/)。 +- 将成员集群设置为[公开集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#将集群设置为公开集群)。或者,您可以[在创建企业空间之后设置集群可见性](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/#在创建企业空间后设置集群可见性)。 +- 在主集群上[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 - 整合 SonarQube 进入流水线。有关更多信息,请参见[将 SonarQube 集成到流水线](../../how-to-integrate/sonarqube/)。 -- 在 Host 集群创建四个帐户: `ws-manager`、`ws-admin`、`project-admin` 和 `project-regular`,然后授予他们不同的角色。有关详细信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 +- 在主集群创建四个帐户: `ws-manager`、`ws-admin`、`project-admin` 和 `project-regular`,然后授予他们不同的角色。有关详细信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 ## 工作流程概述 @@ -34,53 +34,43 @@ weight: 11440 | 集群名称 | 集群角色 | 用途 | | -------- | ----------- | ---- | -| host | Host 集群 | 测试 | -| shire | Member 集群 | 生产 | -| rohan | Member 集群 | 开发 | +| host | 主集群 | 测试 | +| shire | 成员集群 | 生产 | +| rohan | 成员集群 | 开发 | {{< notice note >}} -这些 Kubernetes 集群可以被托管至不同的云厂商,也可以使用不同的 Kubernetes 版本。针对 KubeSphere v3.1.0 推荐的 Kubernetes 版本:v1.17.9、v1.18.8、v1.19.8 和 v1.20.4。 +这些 Kubernetes 集群可以被托管至不同的云厂商,也可以使用不同的 Kubernetes 版本。针对 KubeSphere 3.2.1 推荐的 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 和 v1.22.x(实验性支持)。 {{}} ### 步骤 2:创建企业空间 -1. 使用 `ws-manager` 帐户登录 Host 集群的 Web 控制台。在**企业空间**页面中,点击**创建**。 +1. 使用 `ws-manager` 帐户登录主集群的 Web 控制台。在**企业空间**页面中,点击**创建**。 2. 在**基本信息**页面中,将企业空间命名为 `devops-multicluster`,选择 `ws-admin` 为**管理员**,然后点击**下一步**。 - ![create-workspace](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-workspace.png) +3. 在**集群设置**页面,选择所有集群(总共三个集群),然后点击**创建**。 -3. 在**集群选择**页面,选择所有集群(总共三个集群),然后点击**创建**。 +4. 创建的企业空间会显示在列表。您需要登出控制台并以 `ws-admin` 身份重新登录,以邀请 `project-admin` 与 `project-regular` 至企业空间,然后分别授予他们 `work-space-self-provisioner` 和 `workspace-viwer` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace)。 - ![select-all-clusters](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/select-all-clusters.png) +### 步骤 3:创建 DevOps 项目 -4. 创建的企业空间会显示在列表。您需要登出控制台并以 `ws-admin` 身份重新登录,以邀请 `project-admin` 与 `project-regular` 至企业空间,然后分别授予他们 `work-space-self-provisioner` 和 `workspace-viwer` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-2-create-a-workspace)。 - - ![workspace-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/workspace-created.png) - -### 步骤 3:创建 DevOps 工程 - -1. 您需要登出控制台,并以 `project-admin` 身份重新登录。转到 **DevOps 工程**页面并点击**创建**。 +1. 您需要登出控制台,并以 `project-admin` 身份重新登录。转到 **DevOps 项目**页面并点击**创建**。 2. 在出现的对话框中,输入 `mulicluster-demo` 作为**名称**,在**集群设置**中选择 **host**,然后点击**确定**。 - ![devops-project](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project.png) - {{< notice note >}} 下拉列表中只有启用 DevOps 组件的集群可用。 {{}} -3. 创建的 DevOps 工程将显示在列表中。请确保邀请帐户 `project-regular` 至这个项目,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 - - ![devops-project-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project-created.png) +3. 创建的 DevOps 项目将显示在列表中。请确保邀请用户 `project-regular` 至这个项目,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 ### 步骤 4:在集群上创建项目 -提前创建如下表所示的项目。请确保邀请 `project-regular` 帐户到这些项目中,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 +提前创建如下表所示的项目。请确保邀请 `project-regular` 用户到这些项目中,并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 | 集群名 | 用途 | 项目名 | | ------ | ---- | ---------------------- | @@ -90,41 +80,35 @@ weight: 11440 ### 步骤 5:创建凭证 -1. 登出控制台,以 `project-regular` 身份重新登录。在 **DevOps 工程**页面,点击 DevOps 工程 `multicluster-demo`。 +1. 登出控制台,以 `project-regular` 身份重新登录。在 **DevOps 项目**页面,点击 DevOps 项目 `multicluster-demo`。 -2. 在 DevOps 凭证页面,您需要创建如下表所示的凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../how-to-use/credential-management/#create-credentials)和[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials)。 +2. 在**凭证**页面,您需要创建如下表所示的凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../how-to-use/credential-management/#create-credentials)和[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile/#step-1-create-credentials)。 | 凭证 ID | 类型 | 应用场所 | | ------------ | ---------- | -------------------- | -| host | kubeconfig | 用于 Host 集群测试 | -| shire | kubeconfig | 用于 Member 集群生产 | -| rohan | kubeconfig | 用于 Member 集群开发 | +| host | kubeconfig | 用于主集群测试 | +| shire | kubeconfig | 用于成员集群生产 | +| rohan | kubeconfig | 用于成员集群开发 | | dockerhub-id | 帐户凭证 | Docker Hub | | sonar-token | 秘密文本 | SonarQube | {{< notice note >}} -在创建 kubeconfig 凭证 `shire` 和 `rohan` 时,必须手动输入 Member 集群的 kubeconfig。确保 Host 集群可以访问 Member 集群的 APIServer 地址。 +在创建 kubeconfig 凭证 `shire` 和 `rohan` 时,必须手动输入成员集群的 kubeconfig。确保主集群可以访问成员集群的 API Server 地址。 {{}} -3. 您会拥有五个凭证。 - -![credentials-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/credentials-created.png) +3. 共创建五个凭证。 ### 步骤 6:创建流水线 1. 在**流水线**页面点击**创建**。在显示的对话框中,输入 `build-and-deploy-application` 作为**名称**然后点击**下一步**。 - ![pipeline-name](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-name.png) - 2. 在**高级设置中**选项卡中,点击**创建**即使用默认配置。 -3. 列表会展示被创建的流水线,点击流水线进入详细页面。 +3. 列表会展示被创建的流水线,点击流水线名称进入详情页面。 - ![pipeline-created](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-created.png) - -4. 点击**编辑 Jenkinsfile**,复制和粘贴以下内容。请确保将 DOCKERHUB_NAMESPACE 的值替换为您自己的值,然后点击**确认**。 +4. 点击**编辑 Jenkinsfile**,复制和粘贴以下内容。请确保将 DOCKERHUB_NAMESPACE 的值替换为您自己的值,然后点击**确定**。 ```groovy pipeline { @@ -145,7 +129,7 @@ weight: 11440 REGISTRY = 'docker.io' DOCKERHUB_NAMESPACE = 'your Docker Hub account ID' - APP_NAME = 'devops-java-sample' + APP_NAME = 'devops-maven-sample' SONAR_CREDENTIAL_ID = 'sonar-token' TAG_NAME = "SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER" } @@ -153,16 +137,15 @@ weight: 11440 stage('checkout') { steps { container('maven') { - git branch: 'master', url: 'https://github.com/kubesphere/devops-java-sample.git' + git branch: 'master', url: 'https://github.com/kubesphere/devops-maven-sample.git' } } } stage('unit test') { steps { container('maven') { - sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test' + sh 'mvn clean test' } - } } stage('sonarqube analysis') { @@ -170,26 +153,22 @@ weight: 11440 container('maven') { withCredentials([string(credentialsId: "$SONAR_CREDENTIAL_ID", variable: 'SONAR_TOKEN')]) { withSonarQubeEnv('sonar') { - sh "mvn sonar:sonar -o -gs `pwd`/configuration/settings.xml -Dsonar.login=$SONAR_TOKEN" + sh "mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN" } - } } - } } stage('build & push') { steps { container('maven') { - sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package' + sh 'mvn -Dmaven.test.skip=true clean package' sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .' withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) { sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin' sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER' } - } - } } stage('push latest') { @@ -198,29 +177,51 @@ weight: 11440 sh 'docker tag $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:latest ' } - } } stage('deploy to dev') { steps { - kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$DEV_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + withCredentials([ + kubeconfigFile( + credentialsId: env.DEV_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/dev-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } stage('deploy to staging') { steps { - input(id: 'deploy-to-staging', message: 'deploy to staging?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$TEST_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + input(id: 'deploy-to-staging', message: 'deploy to staging?') + withCredentials([ + kubeconfigFile( + credentialsId: env.TEST_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } stage('deploy to production') { steps { - input(id: 'deploy-to-production', message: 'deploy to production?') - kubernetesDeploy(configs: 'deploy/prod-ol/**', enableConfigSubstitution: true, kubeconfigId: "$PROD_KUBECONFIG_CREDENTIAL_ID") + container('maven') { + input(id: 'deploy-to-production', message: 'deploy to production?') + withCredentials([ + kubeconfigFile( + credentialsId: env.PROD_KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < deploy/prod-all-in-one/devops-sample.yaml | kubectl apply -f -' + } + } } } } } - ``` {{< notice note >}} @@ -231,35 +232,15 @@ weight: 11440 5. 流水线创建之后,可以在图形编辑面板上查看流水线的阶段和步骤。 - ![pipeline-panel](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-panel.png) - ### 步骤7:运行流水线并查看结果 1. 点击**运行**按钮运行流水线。当流水线运行达到**部署到暂存**的阶段,将会暂停,因为资源已经被部署到集群进行开发。您需要手动点击**继续**两次,以将资源部署到测试集群 `host` 和生产集群 `shire`。 - ![deploy-to-staging](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/deploy-to-staging.png) - 2. 一段时间过后,您可以看见流水线的状态展示为**成功**。 - ![pipeline-success](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-success.png) - 3. 在右上角点击**查看日志**,查看流水线运行日志。对于每个阶段,您可以点击显示日志以检查日志,同时日志可以被下载到本地进行进一步的分析。 - ![pipeline-logs](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-logs.png) - -4. 当流水线运行成功时,点击**代码质量**,通过 SonarQube 检查结果。 - - ![sonarqube-result](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/sonarqube-result.png) +4. 当流水线运行成功时,点击**代码检查**,通过 SonarQube 检查结果。 5. 转到**项目**页面,您可以通过从下拉列表中选择特定集群,来查看部署在各集群不同项目中的资源。 - ![host-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/host-pods.png) - - ![shire-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/shire-pods.png) - - ![rohan-pods](/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/rohan-pods.png) - - - - - diff --git a/content/zh/docs/devops-user-guide/examples/customize-jenkins-agent.md b/content/zh/docs/devops-user-guide/examples/customize-jenkins-agent.md new file mode 100644 index 000000000..768124056 --- /dev/null +++ b/content/zh/docs/devops-user-guide/examples/customize-jenkins-agent.md @@ -0,0 +1,70 @@ +--- +title: "自定义 Jenkins Agent" +keywords: "KubeSphere, Kubernetes, DevOps, Jenkins, Agent" +description: "了解如何在 KubeSphere 上自定义 Jenkins Agent。" +linkTitle: "自定义 Jenkins Agent" +Weight: 11460 +--- + +如果您需要使用运行特定环境(例如 JDK 11)的 Jenkins Agent,您可以在 KubeSphere 上自定义 Jenkins Agent。 + +本文档描述如何在 KubeSphere 上自定义 Jenkins Agent。 + +## 准备工作 + +- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 + +## 自定义 Jenkins Agent + +1. 以 `admin` 用户登录 KubeSphere Web 控制台。 + +2. 点击左上角的**平台管理**,选择**集群管理**,然后在左侧导航栏点击**配置**下的**配置字典**。 + +3. 在**配置字典**页面的搜索框中输入 `jenkins-casc-config` 并按**回车键**。 + +4. 点击 `jenkins-casc-config` 进入其详情页面,点击**更多操作**,选择**编辑 YAML**。 + +5. 在弹出的对话框中,搜寻至 `data.jenkins_user.yaml:jenkins.clouds.kubernetes.templates` 下方并输入以下代码,点击**确定**。 + + ```yaml + - name: "maven-jdk11" # 自定义 Jenkins Agent 的名称。 + label: "maven jdk11" # 自定义 Jenkins Agent 的标签。若要指定多个标签,请用空格来分隔标签。 + inheritFrom: "maven" # 该自定义 Jenkins Agent 所继承的现有容器组模板的名称。 + containers: + - name: "maven" # 该自定义 Jenkins Agent 所继承的现有容器组模板中指定的容器名称。 + image: "kubespheredev/builder-maven:v3.2.0jdk11" # 此镜像只用于测试。您可以使用自己的镜像。 + ``` + + {{< notice note >}} + + 请确保遵守 YAML 文件中的缩进。 + + {{}} + +6. 请至少等待 70 秒,您的改动会自动重新加载。 + +7. 要使用自定义 Jenkins Agent,请参考下方的示例 Jenkinsfile,在创建流水线时指定自定义 Jenkins Agent 对应的标签和容器名。 + + ```groovy + pipeline { + agent { + node { + label 'maven && jdk11' + } + } + stages { + stage('Print Maven and JDK version') { + steps { + container('maven') { + sh ''' + mvn -v + java -version + ''' + } + } + } + } + } + ``` + + diff --git a/content/zh/docs/devops-user-guide/examples/go-project-pipeline.md b/content/zh/docs/devops-user-guide/examples/go-project-pipeline.md index d84258ce8..40e6ebba1 100644 --- a/content/zh/docs/devops-user-guide/examples/go-project-pipeline.md +++ b/content/zh/docs/devops-user-guide/examples/go-project-pipeline.md @@ -1,8 +1,8 @@ --- -title: "构建和部署 Go 工程" +title: "构建和部署 Go 项目" keywords: 'Kubernetes, docker, devops, jenkins, go, KubeSphere' -description: '学习如何使用 KubeSphere 流水线构建并部署 Go 工程。' -linkTitle: "构建和部署 Go 工程" +description: '学习如何使用 KubeSphere 流水线构建并部署 Go 项目。' +linkTitle: "构建和部署 Go 项目" weight: 11410 --- @@ -10,37 +10,25 @@ weight: 11410 - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 - 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户。 -- 您需要创建一个企业空间、一个 DevOps 工程、一个项目和一个帐户 (`project-regular`),需要邀请该帐户至 DevOps 工程和项目中并赋予 `operator` 角色,以部署工作负载。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目、一个项目和一个用户 (`project-regular`),需要邀请该用户至 DevOps 项目和项目中并赋予 `operator` 角色,以部署工作负载。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建 Docker Hub 访问令牌 (Token) -1. 登录 [Docker Hub](https://hub.docker.com/) 并在右上角的菜单中选择 **Account Settings**。 +1. 登录 [Docker Hub](https://hub.docker.com/),点击右上角的帐户,并从菜单中选择 **Account Settings**。 - ![DockerHub 设置](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-settings.PNG) +2. 在左侧导航栏点击 **Security**,然后点击 **New Access Token**。 -2. 在左侧点击 **Security**,然后点击 **New Access Token**。 - - ![DockerHub 创建令牌](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-create-token.PNG) - -3. 输入令牌名称,点击 **Create**。 - - ![DockerHub 令牌创建完成](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-ok.PNG) +3. 在弹出的对话框中,输入令牌名称(`go-project-token`),点击 **Create**。 4. 点击 **Copy and Close** 并务必保存该访问令牌。 - ![复制 DockerHub 令牌](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-copy.PNG) - ## 创建凭证 您需要在 KubeSphere 中为已创建的访问令牌创建凭证,以便流水线能够向 Docker Hub 推送镜像。此外,您还需要创建 kubeconfig 凭证,用于访问 Kubernetes 集群。 -1. 以 `project-regular` 身份登录 KubeSphere Web 控制台,转到您的 DevOps 工程,在**凭证**页面点击**创建**。 +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台。在您的 DevOps 项目中点击 **DevOps 项目设置**下的**凭证**,然后在**凭证**页面点击**创建**。 - ![创建 DockerHub ID](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-dockerhub_id.png) - -2. 在弹出对话框中,设置**凭证 ID**,稍后会用于 Jenkinsfile 中,**类型**选择**帐户凭证**。**用户名**输入您的 Docker Hub 帐户名称,**token / 密码**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 - - ![创建 Docker 凭证](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/credential-docker_create.png) +2. 在弹出的对话框中,设置**名称**,稍后会用于 Jenkinsfile 中,**类型**选择**用户名和密码**。**用户名**输入您的 Docker Hub 帐户名称,**密码/令牌**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 {{< notice tip >}} @@ -48,9 +36,7 @@ weight: 11410 {{}} -3. 再次点击**创建**,**类型**选择 **kubeconfig**。KubeSphere 会自动填充 **Content** 字段,即当前用户帐户的 kubeconfig。设置**凭证 ID**,然后点击**确定**。 - - ![创建 kubeconfig](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-kubeconfig.PNG) +3. 再次点击**创建**,**类型**选择 **kubeconfig**。KubeSphere 会自动填充**内容**字段,即当前用户帐户的 kubeconfig。设置**名称**,然后点击**确定**。 ## 创建流水线 @@ -58,30 +44,20 @@ weight: 11410 1. 要创建流水线,请在**流水线**页面点击**创建**。 - ![创建流水线](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline.PNG) - 2. 在弹出窗口中设置名称,然后点击**下一步**。 - ![设置流水线名称](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/set-pipeline-name.PNG) - -3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,直接点击**创建**。 - - ![创建流水线-2](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline-2.PNG) +3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,点击**创建**。 ## 编辑 Jenkinsfile -1. 在流水线列表中,点击该流水线进入其详情页面。点击**编辑 Jenkinsfile** 定义一个 Jenkinsfile,流水线会基于它来运行。 +1. 在流水线列表中,点击该流水线名称进入其详情页面。点击**编辑 Jenkinsfile** 定义一个 Jenkinsfile,流水线会基于它来运行。 - ![编辑 jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/edit_jenkinsfile.png) - -2. 将以下所有内容复制并粘贴到弹出窗口中,用作流水线的示例 Jenkinsfile。您必须将 `DOCKERHUB_USERNAME`、`DOCKERHUB_CREDENTIAL`、`KUBECONFIG_CREDENTIAL_ID` 和 `PROJECT_NAME` 的值替换成您自己的值。操作完成后,点击**确定**。 +2. 将以下所有内容复制并粘贴到弹出的对话框中,用作流水线的示例 Jenkinsfile。您必须将 `DOCKERHUB_USERNAME`、`DOCKERHUB_CREDENTIAL`、`KUBECONFIG_CREDENTIAL_ID` 和 `PROJECT_NAME` 的值替换成您自己的值。操作完成后,点击**确定**。 ```groovy pipeline { agent { - node { - label 'maven' - } + label 'go' } environment { @@ -91,41 +67,48 @@ weight: 11410 DOCKERHUB_USERNAME = 'Docker Hub Username' // Docker 镜像名称 APP_NAME = 'devops-go-sample' - // ‘dockerhubid’ 是您在 KubeSphere 用 Docker Hub 访问令牌创建的凭证 ID + // 'dockerhubid' 是您在 KubeSphere 用 Docker Hub 访问令牌创建的凭证 ID DOCKERHUB_CREDENTIAL = credentials('dockerhubid') // 您在 KubeSphere 创建的 kubeconfig 凭证 ID KUBECONFIG_CREDENTIAL_ID = 'go' - // 您在 KubeSphere 创建的项目名称,不是 DevOps 工程名称 + // 您在 KubeSphere 创建的项目名称,不是 DevOps 项目名称 PROJECT_NAME = 'devops-go' } stages { stage('docker login') { steps{ - container ('maven') { - sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' - } - } - } - - stage('build & push') { - steps { - container ('maven') { - sh 'git clone https://github.com/yuswift/devops-go-sample.git' - sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' - sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' - } - } - } - stage ('deploy app') { - steps { - container('maven') { - kubernetesDeploy(configs: 'devops-go-sample/manifest/deploy.yaml', kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID") - } + container ('go') { + sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' } } } + + stage('build & push') { + steps { + container ('go') { + sh 'git clone https://github.com/yuswift/devops-go-sample.git' + sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' + sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' + } + } + } + + stage ('deploy app') { + steps { + container ('go') { + withCredentials([ + kubeconfigFile( + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, + variable: 'KUBECONFIG') + ]) { + sh 'envsubst < devops-go-sample/manifest/deploy.yaml | kubectl apply -f -' + } + } + } + } } + } ``` {{< notice note >}} @@ -138,23 +121,14 @@ weight: 11410 1. Jenkinsfile 设置完成后,您可以在仪表板上查看图形面板。点击**运行**来运行流水线。 - ![运行流水线](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/run_pipeline.png) - -2. 在**活动**选项卡中,您可以查看流水线的状态。稍等片刻,流水线便会成功运行。 - - ![流水线成功运行](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/pipeline_running.png) +2. 在**运行记录**选项卡中,您可以查看流水线的状态。稍等片刻,流水线便会成功运行。 ## 验证结果 1. 如果流水线成功运行,则会在 Jenkinsfile 中指定的项目中创建一个**部署 (Deployment)**。 - ![查看部署](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/view_deployment.png) +2. 查看已推送至 Docker Hub 的镜像。 -2. 查看镜像是否已推送至 Docker Hub,如下所示: - - ![Docker 镜像-1](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-1.PNG) - - ![Docker 镜像-2](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-2.PNG) \ No newline at end of file diff --git a/content/zh/docs/devops-user-guide/examples/multi-cluster-project-example.md b/content/zh/docs/devops-user-guide/examples/multi-cluster-project-example.md index 3de89b7a7..79e0e7ba9 100644 --- a/content/zh/docs/devops-user-guide/examples/multi-cluster-project-example.md +++ b/content/zh/docs/devops-user-guide/examples/multi-cluster-project-example.md @@ -10,39 +10,27 @@ weight: 11420 - 您需要[启用多集群功能](../../../../docs/multicluster-management/)并创建一个多集群企业空间。 - 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户。 -- 您需要在 Host 集群上[启用 KubeSphere DevOps 系统](../../../../docs/pluggable-components/devops/)。 -- 您需要使用具有 `workspace-self-provisioner` 角色的帐户(例如 `project-admin`)创建一个多集群项目,并在 Host 集群上创建一个 DevOps 工程。本教程中的多集群项目创建于 Host 集群和一个 Member 集群上。 -- 您需要邀请一个帐户(例如 `project-regular`)至 DevOps 工程中,赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)、[多集群管理](../../../multicluster-management/)和[多集群项目](../../../project-administration/project-and-multicluster-project/#多集群项目)。 +- 您需要在主集群上[启用 KubeSphere DevOps 系统](../../../../docs/pluggable-components/devops/)。 +- 您需要使用具有 `workspace-self-provisioner` 角色的用户(例如 `project-admin`)创建一个多集群项目,并在主集群上创建一个 DevOps 项目。本教程中的多集群项目创建于主集群和一个成员集群上。 +- 您需要邀请一个用户(例如 `project-regular`)至 DevOps 项目中,赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)、[多集群管理](../../../multicluster-management/)和[多集群项目](../../../project-administration/project-and-multicluster-project/#多集群项目)。 ## 创建 Docker Hub 访问令牌 (Token) -1. 登录 [Docker Hub](https://hub.docker.com/) 并在右上角的菜单中选择 **Account Settings**。 +1. 登录 [Docker Hub](https://hub.docker.com/),点击右上角的帐户,并从菜单中选择 **Account Settings**。 - ![dockerhub 设置](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-settings.PNG) +2. 在左侧导航栏点击 **Security**,然后点击 **New Access Token**。 -2. 在左侧点击 **Security**,然后点击 **New Access Token**。 - - ![dockerhub 创建令牌](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-create-token.PNG) - -3. 输入令牌名称,点击 **Create**。 - - ![dockerhub 令牌创建完成](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-ok.PNG) +3. 在弹出的对话框中,输入令牌名称(`go-project-token`),点击 **Create**。 4. 点击 **Copy and Close** 并务必保存该访问令牌。 - ![dockerhub 复制令牌](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-copy.PNG) - ## 创建凭证 您需要在 KubeSphere 中为已创建的访问令牌创建凭证,以便流水线能够向 Docker Hub 推送镜像。此外,您还需要创建 kubeconfig 凭证,用于访问 Kubernetes 集群。 -1. 以 `project-regular` 身份登录 KubeSphere Web 控制台,前往您的 DevOps 工程,在**凭证**页面点击**创建**。 +1. 以 `project-regular` 身份登录 KubeSphere Web 控制台。在您的 DevOps 项目中点击 **DevOps 项目设置**下的**凭证**,然后在**凭证**页面点击**创建**。 - ![创建 dockerhub ID](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-dockerhub-id-1.png) - -2. 在弹出对话框中,设置**凭证 ID**,稍后会用于 Jenkinsfile 中,**类型**选择**帐户凭证**。**用户名**输入您的 Docker Hub 帐户名称,**token / 密码**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 - - ![创建 Docker 凭证](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/credential-docker.png) +2. 在弹出的对话框中,设置**名称**,稍后会用于 Jenkinsfile 中,**类型**选择**用户名和密码**。**用户名**输入您的 Docker Hub 帐户名称,**密码/令牌**中输入刚刚创建的访问令牌。操作完成后,点击**确定**。 {{< notice tip >}} @@ -50,9 +38,7 @@ weight: 11420 {{}} -3. 登出 KubeSphere Web 控制台,再以 `project-admin` 身份登录。前往您的 DevOps 工程,在**凭证**页面点击**创建**。**类型**选择 **kubeconfig**,KubeSphere 会自动填充 **Content** 字段,即当前帐户的 kubeconfig。设置**凭证 ID**,然后点击**确定**。 - - ![创建 kubeconfig](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-kubeconfig.PNG) +3. 登出 KubeSphere Web 控制台,再以 `project-admin` 身份登录。前往您的 DevOps 项目,在**凭证**页面点击**创建**。**类型**选择 **kubeconfig**,KubeSphere 会自动填充**内容**字段,即当前帐户的 kubeconfig。设置**名称**,然后点击**确定**。 {{< notice note >}} @@ -66,31 +52,20 @@ weight: 11420 1. 要创建流水线,请在**流水线**页面点击**创建**。 - ![创建流水线](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create_pipeline.png) - 2. 在弹出窗口中设置名称,然后点击**下一步**。 - ![设置流水线名称](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/set-pipeline_name.png) - -3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,直接点击**创建**。 - - ![创建流水线-2](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-pipeline_2.png) +3. 在本教程中,您可以为所有字段使用默认值。在**高级设置**页面,点击**创建**。 ## 编辑 Jenkinsfile 1. 在流水线列表中,点击该流水线进入其详情页面。点击**编辑 Jenkinsfile** 定义一个 Jenkinsfile,流水线会基于它来运行。 - ![编辑 jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/edit_jenkinsfile.png) - 2. 将以下所有内容复制并粘贴到弹出窗口中,用作流水线的示例 Jenkinsfile。您必须将 `DOCKERHUB_USERNAME`、`DOCKERHUB_CREDENTIAL`、`KUBECONFIG_CREDENTIAL_ID`、`MULTI_CLUSTER_PROJECT_NAME` 和 `MEMBER_CLUSTER_NAME` 的值替换成您自己的值。操作完成后,点击**确定**。 ```groovy pipeline { agent { - node { - label 'maven' - } - + label 'go' } environment { @@ -98,31 +73,30 @@ weight: 11420 // Docker Hub 用户名 DOCKERHUB_USERNAME = 'Your Docker Hub username' APP_NAME = 'devops-go-sample' - // ‘dockerhub-go’ 即您在 KubeSphere 控制台上创建的 Docker Hub 凭证 ID - DOCKERHUB_CREDENTIAL = credentials('dockerhub-go') + // ‘dockerhub’ 即您在 KubeSphere 控制台上创建的 Docker Hub 凭证 ID + DOCKERHUB_CREDENTIAL = credentials('dockerhub') // 您在 KubeSphere 控制台上创建的 kubeconfig 凭证 ID - KUBECONFIG_CREDENTIAL_ID = dockerhub-go-kubeconfig + KUBECONFIG_CREDENTIAL_ID = 'kubeconfig' // 您企业空间中的多集群项目名称 MULTI_CLUSTER_PROJECT_NAME = 'demo-multi-cluster' - // 您用来部署应用的 Member 集群名称 - // 本教程中,应用部署在 Host 集群和一个 Member 集群上 - // 若需要部署在多个 Member 集群上, 请编辑 manifest/multi-cluster-deploy.yaml + // 您用来部署应用的成员集群名称 + // 本教程中,应用部署在主集群和一个成员集群上 + // 若需要部署在多个成员集群上, 请编辑 manifest/multi-cluster-deploy.yaml MEMBER_CLUSTER_NAME = 'Your Member Cluster name' } stages { stage('docker login') { steps { - container('maven') { + container('go') { sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin' } - } } stage('build & push') { steps { - container('maven') { + container('go') { sh 'git clone https://github.com/yuswift/devops-go-sample.git' sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .' sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME' @@ -132,17 +106,15 @@ weight: 11420 stage('deploy app to multi cluster') { steps { - container('maven') { - script { + container('go') { withCredentials([ kubeconfigFile( - credentialsId: 'dockerhub-go-kubeconfig', + credentialsId: env.KUBECONFIG_CREDENTIAL_ID, variable: 'KUBECONFIG') ]) { sh 'envsubst < devops-go-sample/manifest/multi-cluster-deploy.yaml | kubectl apply -f -' } - } - } + } } } } @@ -158,5 +130,3 @@ weight: 11420 ## 运行流水线 保存 Jenkinsfile 后,点击**运行**。如果一切顺利,您会在您的多集群项目中看到部署 (Deployment) 工作负载。 - -![Deployment](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/multi-cluster_ok.png) \ No newline at end of file diff --git a/content/zh/docs/devops-user-guide/examples/use-nexus-in-pipelines.md b/content/zh/docs/devops-user-guide/examples/use-nexus-in-pipelines.md index 83bbcd580..ea3939084 100644 --- a/content/zh/docs/devops-user-guide/examples/use-nexus-in-pipelines.md +++ b/content/zh/docs/devops-user-guide/examples/use-nexus-in-pipelines.md @@ -20,7 +20,7 @@ weight: 11450 - 准备一个[GitHub](https://github.com/) 帐户。 -- 创建一个企业空间、一个 DevOps 工程(例如,`demo-devops`)和一个帐户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 创建一个企业空间、一个 DevOps 项目(例如,`demo-devops`)和一个用户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -36,20 +36,14 @@ weight: 11450 - `group`:一组已配置好的 Nexus 仓库。 - ![repo-type](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png) - -3. 点击仓库查看它的详细信息。例如:点击 **maven-public** 进去详情页面,并且查看它的 URL。 - - ![maven-public-url](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png) +3. 点击仓库查看它的详细信息。例如:点击 **maven-public** 进去详情页面,并且查看它的 **URL**。 ### 步骤 2:在 GitHub 仓库修改 `pom.xml` -1. 登录 GitHub,fork [示例仓库](https://github.com/devops-ws/learn-pipeline-java)到您的 GitHub 帐户。 +1. 登录 GitHub,Fork [示例仓库](https://github.com/devops-ws/learn-pipeline-java)到您的 GitHub 帐户。 2. 在您的 **learn-pipline-java** GitHub 仓库中,点击根目录下的文件 `pom.xml`。 - ![click-pom](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png) - 3. 在文件中点击 以修改 `` 代码片段。设置 `` 并使用您的 Nexus 仓库的 URL。 ![modify-pom](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/modify-pom.png) @@ -60,15 +54,11 @@ weight: 11450 1. 使用 `admin` 帐户登录 KubeSphere Web 控制台,点击左上角的**平台管理**,选择**集群管理**。 -2. 在**配置中心**下面选择 **配置**。在 **配置** 页面上的下拉列表中选择 `kubesphere-devops-system` ,然后点击 `ks-devops-agent`。 +2. 在**配置**下面选择 **配置**。在 **配置** 页面上的下拉列表中选择 `kubesphere-devops-worker` ,然后点击 `ks-devops-agent`。 - ![ks-devops-agent](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png) +3. 在详情页面,点击下拉菜单**更多操作**中的**编辑 YAML**。 -3. 在详情页面,点击下拉菜单**更多操作**中的**编辑配置文件**。 - - ![click-edit-yaml](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png) - -4. 在显示的会话窗中,向下滚动,找到 `` 代码片段,输入下列代码: +4. 在弹出的对话框中,向下滚动,找到 `` 代码片段,输入下列代码: ```yaml @@ -105,34 +95,28 @@ weight: 11450 {{< notice note >}} - `` 是您在步骤 2 设置给 Nexus 唯一标识符。 `` 是 Nexus 仓库的名字。 `` 是您 Nexus 仓库的 URL。 `` 是要镜像的 Maven 仓库。在本教程,输入 `*` 镜像所有 Maven 仓库。有关更多信息请参考[为仓库使用镜像](http://maven.apache.org/guides/mini/guide-mirror-settings.html)。 + `` 是您在步骤 2 设置给 Nexus 唯一标识符。 `` 是 Nexus 仓库的名称。 `` 是您 Nexus 仓库的 URL。 `` 是要镜像的 Maven 仓库。在本教程,输入 `*` 镜像所有 Maven 仓库。有关更多信息请参考[为仓库使用镜像](http://maven.apache.org/guides/mini/guide-mirror-settings.html)。 {{}} -6. 当您完成,点击**更新**。 +6. 当您完成,点击**确定**。 ### 步骤 4:创建流水线 -1. 登出 KubeSphere Web 控制台,使用帐户 `project-regular` 登录。转到 DevOps 工程,然后在**流水线**页面点击**创建**。 +1. 登出 KubeSphere Web 控制台,使用帐户 `project-regular` 登录。转到 DevOps 项目,然后在**流水线**页面点击**创建**。 -2. 在**基础信息**选项卡中,为流水线设置名字(例如,`nexus-pipeline`),然后点击**下一步**。 - - ![set-pipeline-name](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png) +2. 在**基础信息**选项卡中,为流水线设置名称(例如,`nexus-pipeline`),然后点击**下一步**。 3. 在**高级设置**选项卡中,点击**创建**以使用默认配置。 -4. 点击流水线进入它的详情页面,然后点击**编辑 Jenkinsfile**。 +4. 点击流水线名称进入它的详情页面,然后点击**编辑 Jenkinsfile**。 - ![click-edit-jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png) - -5. 在出现的会话窗口中,在 Jenkinsfile 中输入如下内容。当您完成,点击**确定**。 +5. 在弹出的对话框中,输入以下 Jenkinsfile。完成后,点击**确定**。 ```groovy pipeline { agent { - node { label 'maven' - } } stages { stage ('clone') { @@ -164,12 +148,9 @@ weight: 11450 } } ``` - - ![enter-jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png) - {{< notice note >}} - 您需要用您自己的 GitHub 仓库地址替换原有的仓库地址。在 `deploy to Nexus` 阶段的步骤中的命令中,`nexus` 是您在 ConfigMap 上设置在 `` 上的名字,同时 `http://135.68.37.85:8081/repository/maven-snapshots/` 是您 Nexus 仓库的 URL。 + 您需要用您自己的 GitHub 仓库地址替换原有的仓库地址。在 `deploy to Nexus` 阶段的步骤中的命令中,`nexus` 是您在 ConfigMap 上设置在 `` 上的名称,同时 `http://135.68.37.85:8081/repository/maven-snapshots/` 是您 Nexus 仓库的 URL。 {{}} @@ -177,21 +158,15 @@ weight: 11450 1. 您可以在图形编辑面板中看到所有的阶段和步骤,点击**运行**去运行流水线。 - ![click-run](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png) - 2. 一段时间过后,你可以看到流水线的状态显示**成功**。点击**成功**的记录查看细节。 - ![pipeline-success](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png) - 3. 您可以点击**查看日志**查看更详细的日志。 - ![pipeline-logs](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png) - -4. 登录 Nexus 点击**浏览**。点击 **maven-public**,可以看到所有依赖已经被下载好。 +4. 登录 Nexus 点击**浏览**。点击 **maven-public**,可以看到已经下载所有依赖。 ![maven-public](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public.png) -5. 回到 **Browse** 页面,点击 **maven-sanpshots**。可以看到所有 JAR 包已经被上传到仓库。 +5. 回到 **Browse** 页面,点击 **maven-sanpshots**。可以看到所有 JAR 包已经上传至仓库。 ![maven-snapshots](/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-snapshots.png) diff --git a/content/zh/docs/devops-user-guide/how-to-integrate/harbor.md b/content/zh/docs/devops-user-guide/how-to-integrate/harbor.md index f72d1b1a4..6e9052593 100644 --- a/content/zh/docs/devops-user-guide/how-to-integrate/harbor.md +++ b/content/zh/docs/devops-user-guide/how-to-integrate/harbor.md @@ -11,7 +11,7 @@ weight: 11320 ## 准备工作 - 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`)。需要邀请该帐户至 DevOps 工程并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`)。需要邀请该用户至 DevOps 项目并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 安装 Harbor @@ -26,25 +26,15 @@ helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL ## 获取 Harbor 凭证 -1. 安装 Harbor 后,请访问 `NodeIP:30002` 并使用默认帐户和密码 (`admin/Harbor12345`) 登录控制台。转到**项目**并点击**新建项目**。 +1. 安装 Harbor 后,请访问 `:30002` 并使用默认帐户和密码 (`admin/Harbor12345`) 登录控制台。在左侧导航栏中点击**项目**并在**项目**页面点击**新建项目**。 - ![harbor-projects1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects1.png) +2. 在弹出的对话框中,设置项目名称 (`ks-devops-harbor`) 并点击**确定**。 -2. 设置项目名称 (`ks-devops-harbor`) 并点击**确定**。 +3. 点击刚刚创建的项目,在**机器人帐户**选项卡下点击**添加机器人帐户**。 - ![set-name1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name1.png) +4. 在弹出的对话框中,为机器人帐户设置名称 (`robot-test`) 并点击**添加**。请确保在**权限**中勾选推送制品的权限选框。 -3. 点击刚刚创建的项目,在**机器人帐户**选项卡下选择**添加机器人帐户**。 - - ![robot-account1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account1.png) - -4. 为机器人帐户设置名称 (`robot-test`) 并保存。 - - ![robot-account-name1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name1.png) - -5. 点击**导出到文件中**,保存该令牌。 - - ![export-to-file1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file1.png) +5. 在弹出的对话框中,点击**导出到文件中**,保存该令牌。 ## 启用 Insecure Registry @@ -79,13 +69,9 @@ helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL ## 创建凭证 -1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 工程,在**工程管理**下的**凭证**页面为 Harbor 创建凭证。 +1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面为 Harbor 创建凭证。 - ![创建凭证](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/create-credentials.PNG) - -2. 在**创建凭证**页面,设置凭证 ID (`robot-test`),**类型**选择**帐户凭证**。**用户名**字段必须和您刚刚下载的 JSON 文件中 `name` 的值相同,并在 **token / 密码**中输入该文件中 `token` 的值。 - - ![credentials-page2](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page2.png) +2. 在**创建凭证**页面,设置凭证 ID (`robot-test`),**类型**选择**用户名和密码**。**用户名**字段必须和您刚刚下载的 JSON 文件中 `name` 的值相同,并在**密码/令牌**中输入该文件中 `token` 的值。 3. 点击**确定**以保存。 @@ -93,18 +79,12 @@ helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL 1. 转到**流水线**页面,点击**创建**。在**基本信息**选项卡,输入名称 (`demo-pipeline`),然后点击**下一步**。 - ![basic-info1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info1.png) - 2. **高级设置**中使用默认值,点击**创建**。 - ![advanced-settings1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings1.png) - ## 编辑 Jenkinsfile 1. 点击该流水线进入其详情页面,然后点击**编辑 Jenkinsfile**。 - ![edit-jenkinsfile1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile1.png) - 2. 将以下内容复制粘贴至 Jenkinsfile。请注意,您必须将 `REGISTRY`、`HARBOR_NAMESPACE`、`APP_NAME` 和 `HARBOR_CREDENTIAL` 替换为您自己的值。 ```groovy diff --git a/content/zh/docs/devops-user-guide/how-to-integrate/sonarqube.md b/content/zh/docs/devops-user-guide/how-to-integrate/sonarqube.md index 3184e67b2..2eb8f60a9 100644 --- a/content/zh/docs/devops-user-guide/how-to-integrate/sonarqube.md +++ b/content/zh/docs/devops-user-guide/how-to-integrate/sonarqube.md @@ -79,20 +79,16 @@ weight: 11310 ```bash $ kubectl get pod -n kubesphere-devops-system NAME READY STATUS RESTARTS AGE - ks-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m + devops-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m s2ioperator-0 1/1 Running 1 84m sonarqube-postgresql-0 1/1 Running 0 5m31s sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s ``` -2. 在浏览器中访问 SonarQube 控制台 `http://{$Node IP}:{$NodePort}`,您可以看到其主页,如下所示: - - ![访问 SonarQube 控制台](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/access-sonarqube-console.png) +2. 在浏览器中访问 SonarQube 控制台 `http://:`。 3. 点击右上角的 **Log in**,然后使用默认帐户 `admin/admin` 登录。 - ![登录页面](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/log-in-page.png) - {{< notice note >}} 取决于您的实例的部署位置,您可能需要设置必要的端口转发规则,并在您的安全组中放行该端口,以便访问 SonarQube。 @@ -124,7 +120,7 @@ weight: 11310 1. 执行以下命令获取 SonarQube Webhook 的地址。 ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/ ``` @@ -143,7 +139,7 @@ weight: 11310 ![SonarQube Webhook-2](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-webhook-2.png) -5. 在弹出对话框中输入 **Name** 和 **Jenkins Console URL**(即 SonarQube Webhook 地址)。点击 **Create** 完成操作。 +5. 在弹出的对话框中输入 **Name** 和 **Jenkins Console URL**(即 SonarQube Webhook 地址)。点击 **Create** 完成操作。 ![Webhook 页面信息](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/webhook-page-info.png) @@ -178,7 +174,7 @@ weight: 11310 1. 执行以下命令获取 Jenkins 的地址。 ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT ``` @@ -189,9 +185,7 @@ weight: 11310 http://10.77.1.201:30180 ``` -3. 请使用地址 `http://{$Public IP}:30180` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。此外,Jenkins 还配置有 KubeSphere LDAP,这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。有关配置 Jenkins 的更多信息,请参见 [Jenkins 系统设置](../../../devops-user-guide/how-to-use/jenkins-setting/)。 - - ![Jenkins 登录页面](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-login-page.png) +3. 请使用地址 `http://:30180` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。此外,Jenkins 还配置有 KubeSphere LDAP,这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。有关配置 Jenkins 的更多信息,请参见 [Jenkins 系统设置](../../../devops-user-guide/how-to-use/jenkins-setting/)。 {{< notice note >}} @@ -199,19 +193,13 @@ weight: 11310 {{}} -4. 点击左侧的**系统管理**。 - - ![管理 Jenkins](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/manage-jenkins.png) +4. 点击左侧导航栏中的**系统管理**。 5. 向下翻页找到并点击**系统配置**。 - ![configure-system](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/configure-system.png) - 6. 搜寻到 **SonarQube servers**,然后点击 **Add SonarQube**。 - ![添加 SonarQube](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-sonarqube.png) - -7. 输入 **Name** 和 **Server URL** (`http://{$Node IP}:{$NodePort}`)。点击**添加**,选择 **Jenkins**,然后在弹出对话框中用 SonarQube 管理员令牌创建凭证(如下方第二张截图所示)。创建凭证后,从 **Server authentication token** 旁边的下拉列表中选择该凭证。点击**应用**完成操作。 +7. 输入 **Name** 和 **Server URL** (`http://:`)。点击**添加**,选择 **Jenkins**,然后在弹出的对话框中用 SonarQube 管理员令牌创建凭证(如下方第二张截图所示)。创建凭证后,从 **Server authentication token** 旁边的下拉列表中选择该凭证。点击**应用**完成操作。 ![sonarqube-jenkins-settings](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-jenkins-settings.png) @@ -233,17 +221,13 @@ weight: 11310 kubectl edit cm -n kubesphere-system ks-console-config ``` -2. 搜寻到 `client`,添加 `devops` 字段并指定 `sonarqubeURL`。 +2. 搜寻到 `data.client.enableKubeConfig`,在下方添加 `devops` 字段并指定 `sonarqubeURL`。 ```bash client: - version: - kubesphere: v3.0.0 - kubernetes: v1.17.9 - openpitrix: v0.3.5 enableKubeConfig: true - devops: # Add this field manually. - sonarqubeURL: http://10.77.1.201:31377 # The SonarQube IP address. + devops: # 手动添加该字段。 + sonarqubeURL: http://10.77.1.201:31377 # SonarQube IP 地址。 ``` 3. 保存该文件。 @@ -253,14 +237,14 @@ weight: 11310 执行以下命令。 ```bash -kubectl -n kubesphere-system rollout restart deploy ks-apiserver +kubectl -n kubesphere-devops-system rollout restart deploy devops-apiserver ``` ```bash kubectl -n kubesphere-system rollout restart deploy ks-console ``` -## 为新工程创建 SonarQube Token +## 为新项目创建 SonarQube Token 您需要一个 SonarQube 令牌,以便您的流水线可以在运行时与 SonarQube 通信。 @@ -268,11 +252,11 @@ kubectl -n kubesphere-system rollout restart deploy ks-console ![SonarQube 创建项目](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-create-project.png) -2. 输入工程密钥,例如 `java-demo`,然后点击 **Set Up**。 +2. 输入项目密钥,例如 `java-demo`,然后点击 **Set Up**。 ![Jenkins 项目密钥](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-projet-key.png) -3. 输入工程名称,例如 `java-sample`,然后点击 **Generate**。 +3. 输入项目名称,例如 `java-sample`,然后点击 **Generate**。 ![创建令牌](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/generate-a-token.png) @@ -286,6 +270,4 @@ kubectl -n kubesphere-system rollout restart deploy ks-console ## 在 KubeSphere 控制台查看结果 -您[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel/)或[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile/)之后,可以查看代码质量分析的结果。如果 SonarQube 成功运行,您可能会看到下图所示结果。 - -![code-analysis](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/code-analysis.png) \ No newline at end of file +您[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel/)或[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile/)之后,可以查看代码质量分析的结果。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md b/content/zh/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md index d268afffd..0670e1962 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md +++ b/content/zh/docs/devops-user-guide/how-to-use/choose-jenkins-agent.md @@ -12,7 +12,7 @@ weight: 11250 podTemplate 是一种 Pod 模板,该 Pod 用于创建 Agent。用户可以定义在 Kubernetes 插件中使用的 podTemplate。 -当流水线运行时,每个 Jenkins Agent Pod 必须具有一个名为 `jnlp` 的容器,用于 Jenkins Master 和 Jenkins Agent 之间进行通信。另外,用户可以在 podTemplate 中添加容器以满足自己的需求。用户可以选择使用自己的 Pod YAML 来灵活地控制运行时环境 (Runtime),并且可以通过 `container` 命令来切换容器。请参见以下示例。 +当流水线运行时,每个 Jenkins Agent Pod 必须具有一个名为 `jnlp` 的容器,用于 Jenkins Controller 和 Jenkins Agent 之间进行通信。另外,用户可以在 podTemplate 中添加容器以满足自己的需求。用户可以选择使用自己的 Pod YAML 来灵活地控制运行时环境 (Runtime),并且可以通过 `container` 命令来切换容器。请参见以下示例。 ```groovy pipeline { diff --git a/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md b/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md index f0c14c4e0..ddba121d8 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md +++ b/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel.md @@ -14,7 +14,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 - 您需要有一个 [Docker Hub](http://www.dockerhub.com/) 帐户。 -- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`),必须邀请该帐户至 DevOps 工程中并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),必须邀请该用户至 DevOps 项目中并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 设置 CI 专用节点来运行流水线。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/set-ci-node/)。 - 配置您的电子邮件服务器用于接收流水线通知(可选)。有关更多信息,请参见[为 KubeSphere 流水线设置电子邮件服务器](../../../devops-user-guide/how-to-use/jenkins-email/)。 - 配置 SonarQube 将代码分析纳入流水线中(可选)。有关更多信息,请参见[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。 @@ -40,7 +40,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ### 步骤 1:创建凭证 -1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 工程,在**工程管理**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../credential-management/)。 +1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../credential-management/)。 {{< notice note >}} @@ -48,38 +48,28 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ {{}} - | 凭证 ID | 类型 | 用途 | - | --------------- | ---------- | ---------- | - | dockerhub-id | 帐户凭证 | Docker Hub | - | demo-kubeconfig | kubeconfig | Kubernetes | + | 凭证 ID | 类型 | 用途 | + | --------------- | ------------ | ---------- | + | dockerhub-id | 用户名和密码 | Docker Hub | + | demo-kubeconfig | kubeconfig | Kubernetes | -2. 您还需要为 SonarQube 创建一个凭证 ID (`sonar-token`),用于上述的阶段 3(代码分析)。请参考[为新工程创建 SonarQube 令牌 (Token)](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project),在下图所示的**密钥**字段中输入令牌。点击**确定**完成操作。 - - ![Sonar 令牌](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonar-token.PNG) +2. 您还需要为 SonarQube 创建一个凭证 ID (`sonar-token`),用于上述的阶段 3(代码分析)。请参阅[为新项目创建 SonarQube 令牌 (Token)](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project),在**访问令牌**类型的凭证的**令牌**字段中输入 SonarQube 令牌。点击**确定**完成操作。 3. 您可以在列表中看到已创建的三个凭证。 - ![凭证列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/credential-list.PNG) - ### 步骤 2:创建项目 -在本教程中,示例流水线会将 [sample](https://github.com/kubesphere/devops-java-sample/tree/sonarqube) 应用部署至一个项目。因此,您必须先创建一个项目(例如 `kubesphere-sample-dev`)。待流水线成功运行,会在该项目中自动创建该应用的部署和服务。 +在本教程中,示例流水线会将 [sample](https://github.com/kubesphere/devops-maven-sample/tree/sonarqube) 应用部署至一个项目。因此,您必须先创建一个项目(例如 `kubesphere-sample-dev`)。待流水线成功运行,会在该项目中自动创建该应用的部署和服务。 -您可以使用 `project-admin` 帐户创建项目。此外,该帐户也是 CI/CD 流水线的审核员。请确保将 `project-regular` 帐户邀请至该项目并授予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您可以使用 `project-admin` 帐户创建项目。此外,该用户也是 CI/CD 流水线的审核员。请确保将 `project-regular` 帐户邀请至该项目并授予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ### 步骤 3:创建流水线 -1. 请确保以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 工程。在**流水线**页面点击**创建**。 +1. 请确保以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 项目。在**流水线**页面点击**创建**。 - ![创建流水线](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/create_pipeline.png) +2. 在弹出的对话框中,将它命名为 `graphical-pipeline`,点击**下一步**。 -2. 在弹出对话框中,将它命名为 `graphical-pipeline`,点击**下一步**。 - - ![基本信息](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/basic_info.png) - -3. 在**高级设置**页面,点击**添加参数**,添加以下三个字符串参数。这些参数将用于流水线的 Docker 命令。添加完成后,点击**创建**。 - - ![添加参数](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/add_parameter.png) +3. 在**高级设置**页面,点击**添加**,添加以下三个字符串参数。这些参数将用于流水线的 Docker 命令。添加完成后,点击**创建**。 | 参数类型 | 名称 | 值 | 描述信息 | | -------- | ------------------- | --------------- | ------------------------------------------ | @@ -95,13 +85,11 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ 4. 创建的流水线会显示在列表中。 - ![流水线列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_list.png) - ### 步骤 4:编辑流水线 -点击流水线进入其详情页面。要使用图形编辑面板,请点击**流水线**选项卡下的**编辑流水线**。在弹出对话框中,点击**自定义流水线**。该流水线包括六个阶段,请按照以下步骤设置每个阶段。 +- 点击流水线进入其详情页面。要使用图形编辑面板,请点击**任务状态**选项卡下的**编辑流水线**。在弹出的对话框中,点击**自定义流水线**。该流水线包括六个阶段,请按照以下步骤设置每个阶段。 -![编辑流水线](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/edit_pipeline.png) +- 您也可以点击**持续集成 (CI)** 和**持续集成&交付 (CI/CD)** 来使用 KubeSphere 提供的[内置流水线模板](../use-pipeline-templates/)。 {{< notice note >}} @@ -109,14 +97,6 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ {{}} -![click-custom-pipeline](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/click-custom-pipeline.png) - -{{< notice note >}} - -您也可以点击**持续集成 (CI)** 和**持续集成&交付 (CI/CD)** 来使用 KubeSphere 提供的[内置流水线模板](../use-pipeline-templates/)。 - -{{}} - #### 阶段 1:拉取源代码 (Checkout SCM) 图形编辑面板包括两个区域:左侧的**画布**和右侧的**内容**。它会根据您对不同阶段和步骤的配置自动生成一个 Jenkinsfile,为开发者提供更加用户友好的操作体验。 @@ -141,9 +121,9 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![编辑面板](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/edit_panel.png) -3. 点击**添加步骤**。在列表中选择 **git**,以从 GitHub 拉取示例代码。在弹出对话框中,填写必需的字段。点击**确定**完成操作。 +3. 点击**添加步骤**。在列表中选择 **git**,以从 GitHub 拉取示例代码。在弹出的对话框中,填写必需的字段。点击**确定**完成操作。 - - **URL**:输入 GitHub 仓库地址 `https://github.com/kubesphere/devops-java-sample.git`。请注意,这里是示例地址,您需要使用您自己的仓库地址。 + - **URL**:输入 GitHub 仓库地址 `https://github.com/kubesphere/devops-maven-sample.git`。请注意,这里是示例地址,您需要使用您自己的仓库地址。 - **凭证 ID**:本教程中无需输入凭证 ID。 - **分支**:如果您将其留空,则默认为 master 分支。请输入 `sonarqube`,或者如果您不需要代码分析阶段,请将其留空。 @@ -166,7 +146,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ 3. 点击**添加嵌套步骤**,在 `maven` 容器下添加一个嵌套步骤。在列表中选择 **shell** 并在命令行中输入以下命令。点击**确定**保存操作。 ```shell - mvn clean -o -gs `pwd`/configuration/settings.xml test + mvn clean -gs `pwd`/configuration/settings.xml test ``` {{< notice note >}} @@ -175,9 +155,6 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ {{}} - ![shell](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_set.png) - - ![单元测试设置完成](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/unit_test_set.png) #### 阶段 3:代码分析(可选) @@ -199,7 +176,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![嵌套步骤](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/nested_step.png) -5. 点击 **Sonarqube 配置**,在弹出对话框中保持默认名称 `sonar` 不变,点击**确定**保存操作。 +5. 点击 **Sonarqube 配置**,在弹出的对话框中保持默认名称 `sonar` 不变,点击**确定**保存操作。 ![sonar](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonar_env.png) @@ -210,7 +187,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ 7. 点击 **shell** 并在命令行中输入以下命令,用于 sonarqube 分支和认证,点击**确定**完成操作。 ```shell - mvn sonar:sonar -o -gs `pwd`/configuration/settings.xml -Dsonar.login=$SONAR_TOKEN + mvn sonar:sonar -Dsonar.login=$SONAR_TOKEN ``` ![新的 SonarQube shell](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonarqube_shell_new.png) @@ -221,7 +198,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![超时](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/timeout_set.png) -9. 点击**超时**步骤下的**添加嵌套步骤**,选择**代码质量检查 (SonarQube)**。在弹出对话框中选择**检查通过后开始后续任务**。点击**确定**保存操作。 +9. 点击**超时**步骤下的**添加嵌套步骤**,选择**代码质量检查 (SonarQube)**。在弹出的对话框中选择**检查通过后开始后续任务**。点击**确定**保存操作。 ![waitforqualitygate](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/waitforqualitygate_set.png) @@ -240,12 +217,12 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ 3. 点击 `maven` 容器下的**添加嵌套步骤**添加一个嵌套步骤。在列表中选择 **shell** 并在弹出窗口中输入以下命令,点击**确定**完成操作。 ```shell - mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package + mvn -Dmaven.test.skip=true clean package ``` ![maven 嵌套步骤](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/nested_step_maven.png) -4. 再次点击**添加嵌套步骤**,选择 **shell**。在命令行中输入以下命令,以根据 [Dockerfile](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Dockerfile-online) 构建 Docker 镜像。点击**确定**确认操作。 +4. 再次点击**添加嵌套步骤**,选择 **shell**。在命令行中输入以下命令,以根据 [Dockerfile](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Dockerfile-online) 构建 Docker 镜像。点击**确定**确认操作。 {{< notice note >}} @@ -259,9 +236,9 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![shell 命令](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_command.png) -5. 再次点击**添加嵌套步骤**,选择**添加凭证**。在弹出对话框中填写以下字段,点击**确定**确认操作。 +5. 再次点击**添加嵌套步骤**,选择**添加凭证**。在弹出的对话框中填写以下字段,点击**确定**确认操作。 - - **凭证 ID**:选择您创建的 Docker Hub 凭证,例如 `dockerhub-id`。 + - **凭证名称**:选择您创建的 Docker Hub 凭证,例如 `dockerhub-id`。 - **密码变量**:输入 `DOCKER_PASSWORD`。 - **用户名变量**:输入 `DOCKER_USERNAME`。 @@ -295,7 +272,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![添加制品阶段](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/add_artifact_stage.png) -2. 选中 **Artifacts** 阶段,点击**任务**下的**添加步骤**,选择**保存制品**。在弹出对话框中输入 `target/*.jar`,用于设置 Jenkins 中制品的保存路径。点击**确定**完成操作。 +2. 选中 **Artifacts** 阶段,点击**任务**下的**添加步骤**,选择**保存制品**。在弹出的对话框中输入 `target/*.jar`,用于设置 Jenkins 中制品的保存路径。点击**确定**完成操作。 ![制品信息](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/artifact_info.png) @@ -311,18 +288,27 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ {{< notice note >}} - 在 KubeSphere 3.1 中,能够运行流水线的帐户也能够继续或终止该流水线。此外,流水线创建者、拥有该工程管理员角色的帐户或者您指定的帐户也有权限继续或终止流水线。 + 在 KubeSphere 3.2.x 中,能够运行流水线的帐户也能够继续或终止该流水线。此外,流水线创建者、拥有该项目管理员角色的用户或者您指定的帐户也有权限继续或终止流水线。 {{}} -3. 再次点击 **Deploy to Dev** 阶段下的**添加步骤**。在列表中选择 **kubernetesDeploy** 并在弹出对话框中填写以下字段。点击**确定**保存操作。 +3. 再次点击 **Deploy to Dev** 阶段下的**添加步骤**。在列表中选择**指定容器**,将其命名为 `maven` 然后点击**确定**。 - - **Kubeconfig**:选择您创建的 Kubeconfig,例如 `demo-kubeconfig`。 - - **配置文件路径**:输入 `deploy/no-branch-dev/**`,即代码仓库中 Kubernetes 资源 [YAML](https://github.com/kubesphere/devops-java-sample/tree/sonarqube/deploy/no-branch-dev) 文件的相对路径。 +4. 点击 `maven` 容器步骤下的**添加嵌套步骤**。在列表中选择**添加凭证**,在弹出的对话框中填写以下字段,然后点击**确定**。 - ![kubernetesDeploy](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/kubernetesDeploy_set.png) + - 凭证名称:选择您创建的 kubeconfig 凭证,例如 `demo-kubeconfig`。 + - kubeconfig 变量:输入 `KUBECONFIG_CONTENT`。 -4. 如果您想在流水线成功运行时接收电子邮件通知,请点击**添加步骤**,选择**邮件**,以添加电子邮件信息。请注意,配置电子邮件服务器是可选操作,如果您跳过该步骤,依然可以运行流水线。 +5. 点击**添加凭证**步骤下的**添加嵌套步骤**。在列表中选择 **shell**,在弹出的对话框中输入以下命令,然后点击**确定**。 + + ```shell + mkdir ~/.kube + echo "$KUBECONFIG_CONTENT" > ~/.kube/config + envsubst < deploy/dev-ol/devops-sample-svc.yaml | kubectl apply -f - + envsubst < deploy/dev-ol/devops-sample.yaml | kubectl apply -f - + ``` + +6. 如果您想在流水线成功运行时接收电子邮件通知,请点击**添加步骤**,选择**邮件**,以添加电子邮件信息。请注意,配置电子邮件服务器是可选操作,如果您跳过该步骤,依然可以运行流水线。 {{< notice note >}} @@ -330,42 +316,36 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ {{}} -5. 待您完成上述步骤,请在右下角点击**确认**和**保存**。随后,您可以看到该流水线有完整的工作流,并且每个阶段也清晰列示。当您用图形编辑面板定义流水线时,KubeSphere 会自动创建相应的 Jenkinsfile。点击**编辑 Jenkinsfile** 查看该 Jenkinsfile。 +7. 待您完成上述步骤,请在右下角点击**保存**。随后,您可以看到该流水线有完整的工作流,并且每个阶段也清晰列示。当您用图形编辑面板定义流水线时,KubeSphere 会自动创建相应的 Jenkinsfile。点击**编辑 Jenkinsfile** 查看该 Jenkinsfile。 - ![流水线设置完成](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_done.png) - {{< notice note >}} - 在**流水线**页面,您可以点击该流水线右侧的三个点,然后选择**复制流水线**来创建该流水线的副本。如果您需要同时运行多个不包含多分支的流水线,您可以全部选中这些流水线,然后点击**运行**来批量运行它们。 + 在**流水线**页面,您可以点击该流水线右侧的 ,然后选择**复制**来创建该流水线的副本。如果您需要同时运行多个不包含多分支的流水线,您可以全部选中这些流水线,然后点击**运行**来批量运行它们。 {{}} ### 步骤 5:运行流水线 -1. 您需要手动运行使用图形编辑面板创建的流水线。点击**运行**,您可以在弹出对话框中看到步骤 3 中已定义的三个字符串参数。点击**确定**来运行流水线。 +1. 您需要手动运行使用图形编辑面板创建的流水线。点击**运行**,您可以在弹出的对话框中看到步骤 3 中已定义的三个字符串参数。点击**确定**来运行流水线。 ![运行流水线](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/run_pipeline.png) -2. 要查看流水线的状态,请转到**活动**选项卡,点击您想查看的记录。 +2. 要查看流水线的状态,请转到**运行记录**选项卡,点击您想查看的记录。 3. 稍等片刻,流水线如果成功运行,则会在 **Deploy to Dev** 阶段停止。`project-admin` 作为流水线的审核员,需要进行审批,然后资源才会部署至开发环境。 ![流水线成功运行](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_successful.png) -4. 登出 KubeSphere 控制台,以 `project-admin` 身份重新登录。转到您的 DevOps 工程,点击 `graphical-pipeline` 流水线。在**活动**选项卡下,点击要审核的记录。要批准流水线,请点击**继续**。 +4. 登出 KubeSphere 控制台,以 `project-admin` 身份重新登录。转到您的 DevOps 项目,点击 `graphical-pipeline` 流水线。在**运行记录**选项卡下,点击要审核的记录。要批准流水线,请点击**继续**。 ### 步骤 6:查看流水线详情 -1. 以 `project-regular` 身份重新登录控制台。转到您的 DevOps 工程,点击 `graphical-pipeline` 流水线。在**活动**选项卡下,点击**状态**下标记为**成功**的记录。 +1. 以 `project-regular` 身份重新登录控制台。转到您的 DevOps 项目,点击 `graphical-pipeline` 流水线。在**运行记录**选项卡下,点击**状态**下标记为**成功**的记录。 2. 如果所有配置都成功运行,您可以看到所有阶段都已完成。 - ![完成](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/completed.png) - 3. 在右上角点击**查看日志**,查看所有日志。点击每个阶段查看其详细日志。您可以根据日志排除故障和问题,也可以将日志下载到本地进行进一步分析。 - ![查看日志](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/inspect_logs.png) - ### 步骤 7:下载制品 点击**制品**选项卡,然后点击右侧的图标下载该制品。 @@ -374,7 +354,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ### 步骤 8:查看代码分析结果 -在**代码质量**页面,可以查看由 SonarQube 提供的本示例流水线的代码分析结果。如果您没有事先配置 SonarQube,则该部分不可用。有关更多信息,请参见[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。 +在**代码检查**页面,可以查看由 SonarQube 提供的本示例流水线的代码分析结果。如果您没有事先配置 SonarQube,则该部分不可用。有关更多信息,请参见[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。 ![SonarQube 详细结果](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/sonarqube_result_detail.png) @@ -384,14 +364,8 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ 2. 前往该项目(本教程中即 `kubesphere-sample-dev`),请点击**应用负载**下的**工作负载**,您可以看到列表中显示的部署。 - ![查看部署](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/view_deployment.png) - 3. 在**服务**页面,您可以看到示例服务通过 NodePort 暴露其端口号。要访问服务,请访问 `:`。 - ![服务暴露](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/service_exposed.png) - - ![访问服务](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/access_service.png) - {{< notice note >}} 访问服务前,您可能需要配置端口转发规则并在安全组中放行该端口。 @@ -402,7 +376,7 @@ KubeSphere 中的图形编辑面板包含用于 Jenkins [阶段 (Stage)](https:/ ![DockerHub 镜像](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/dockerhub_image.png) -5. 该应用的名称为 `devops-sample`,即 `APP_NAME` 的值,标签即 `SNAPSHOT-$BUILD_NUMBER` 的值。`$BUILD_NUMBER` 即**活动**选项卡列示的记录的序列号。 +5. 该应用的名称为 `devops-sample`,即 `APP_NAME` 的值,标签即 `SNAPSHOT-$BUILD_NUMBER` 的值。`$BUILD_NUMBER` 即**运行记录**选项卡列示的记录的序列号。 6. 如果您在最后一个阶段设置了电子邮件服务器并添加了电子邮件通知的步骤,您还会收到电子邮件消息。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md b/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md index 7d4ae8154..e9e149915 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md +++ b/content/zh/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md @@ -20,7 +20,7 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 - 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户和一个 [GitHub](https://github.com/) 帐户。 - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`),需要邀请该帐户至 DevOps 工程中并赋予 `operator` 角色。如果尚未准备就绪,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),需要邀请该用户至 DevOps 项目中并赋予 `operator` 角色。如果尚未准备就绪,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要设置 CI 专用节点用于运行流水线。请参考[为依赖项缓存设置 CI 节点](../../how-to-use/set-ci-node/)。 - 您需要安装和配置 SonarQube。请参考[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。如果您跳过这一部分,则没有下面的 **SonarQube 分析**阶段。 @@ -35,7 +35,7 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 - **阶段 1:Checkout SCM**:从 GitHub 仓库检出源代码。 - **阶段 2:单元测试**:待该测试通过后才会进行下一阶段。 - **阶段 3:SonarQube 分析**:SonarQube 代码质量分析。 -- **阶段 4:构建并推送快照镜像**:根据**行为策略**中选定的分支来构建镜像,并将 `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` 标签推送至 Docker Hub,其中 `$BUILD_NUMBER` 为流水线活动列表中的运行序号。 +- **阶段 4:构建并推送快照镜像**:根据**策略设置**中选定的分支来构建镜像,并将 `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` 标签推送至 Docker Hub,其中 `$BUILD_NUMBER` 为流水线活动列表中的运行序号。 - **阶段 5:推送最新镜像**:将 SonarQube 分支标记为 `latest`,并推送至 Docker Hub。 - **阶段 6:部署至开发环境**:将 SonarQube 分支部署到开发环境,此阶段需要审核。 - **阶段 7:带标签推送**:生成标签并发布到 GitHub,该标签会推送到 Docker Hub。 @@ -47,7 +47,7 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 ### 步骤 1:创建凭证 -1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 工程,在**工程管理**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/credential-management/)。 +1. 以 `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/credential-management/)。 {{< notice note >}} @@ -61,9 +61,7 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 | github-id | 帐户凭证 | GitHub | | demo-kubeconfig | kubeconfig | Kubernetes | -2. 您还需要为 SonarQube 创建一个凭证 ID (`sonar-token`),用于上述的阶段 3(SonarQube 分析)。请参考[为新工程创建 SonarQube 令牌 (Token)](../../../devops-user-guide/how-to-integrate/sonarqube/#为新工程创建-sonarqube-token),在下图所示的**密钥**字段中输入令牌。点击**确定**完成操作。 - - ![Sonar 令牌](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.PNG) +2. 您还需要为 SonarQube 创建一个凭证 (`sonar-token`),用于上述的阶段 3(SonarQube 分析)。请参阅[为新项目创建 SonarQube 令牌 (Token)](../../../devops-user-guide/how-to-integrate/sonarqube/#为新项目创建-sonarqube-token),在**访问令牌**类型的凭证的**令牌**字段中输入 SonarQube 令牌。点击**确定**完成操作。 3. 您还需要创建具有如下图所示权限的 GitHub 个人访问令牌 (PAT),然后在 DevOps 项目中,使用生成的令牌创建用于 GitHub 认证的帐户凭证(例如,`github-token`)。 @@ -75,19 +73,13 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 {{}} -4. 您可以在列表中看到已创建的五个凭证。 - - ![credential-list1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list1.png) +4. 您可以在列表页中看到已创建的五个凭证。 ### 步骤 2:在 GitHub 仓库中修改 Jenkinsfile -1. 登录 GitHub 并 Fork GitHub 仓库 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 至您的 GitHub 个人帐户。 +1. 登录 GitHub 并 Fork GitHub 仓库 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 至您的 GitHub 个人帐户。 - ![fork-github-repo1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo1.png) - -2. 在您自己的 GitHub 仓库 **devops-java-sample** 中,点击根目录中的文件 `Jenkinsfile-online`。 - - ![jenkins-edit--1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit--1.png) +2. 在您自己的 GitHub 仓库 **devops-maven-sample** 中,点击根目录中的文件 `Jenkinsfile-online`。 3. 点击右侧的编辑图标,编辑环境变量。 @@ -95,14 +87,14 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 | 条目 | 值 | 描述信息 | | :--- | :--- | :--- | - | DOCKER\_CREDENTIAL\_ID | dockerhub-id | 您在 KubeSphere 中为 Docker Hub 帐户设置的**凭证 ID**。 | - | GITHUB\_CREDENTIAL\_ID | github-id | 您在 KubeSphere 中为 GitHub 帐户设置的**凭证 ID**,用于将标签推送至您的 GitHub 仓库。 | - | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | 您在 KubeSphere 中为 kubeconfig 设置的**凭证 ID**,用于访问运行中的 Kubernetes 集群。 | + | DOCKER\_CREDENTIAL\_ID | dockerhub-id | 您在 KubeSphere 中为 Docker Hub 帐户设置的**名称**。 | + | GITHUB\_CREDENTIAL\_ID | github-id | 您在 KubeSphere 中为 GitHub 帐户设置的**名称**,用于将标签推送至您的 GitHub 仓库。 | + | KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | 您在 KubeSphere 中为 kubeconfig 设置的**名称**,用于访问运行中的 Kubernetes 集群。 | | REGISTRY | docker.io | 默认为 `docker.io`,用作推送镜像的地址。 | | DOCKERHUB\_NAMESPACE | your-dockerhub-account | 请替换为您的 Docker Hub 帐户名,也可以替换为该帐户下的 Organization 名称。 | | GITHUB\_ACCOUNT | your-github-account | 请替换为您的 GitHub 帐户名。例如,如果您的 GitHub 地址是 `https://github.com/kubesphere/`,则您的 GitHub 帐户名为 `kubesphere`,也可以替换为该帐户下的 Organization 名称。 | - | APP\_NAME | devops-java-sample | 应用名称。 | - | SONAR\_CREDENTIAL\_ID | sonar-token | 您在 KubeSphere 中为 SonarQube 令牌设置的**凭证 ID**,用于代码质量检测。 | + | APP\_NAME | devops-maven-sample | 应用名称。 | + | SONAR\_CREDENTIAL\_ID | sonar-token | 您在 KubeSphere 中为 SonarQube 令牌设置的**名称**,用于代码质量检测。 | {{< notice note >}} @@ -112,130 +104,102 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 4. 编辑环境变量后,点击页面底部的 **Commit changes**,更新 SonarQube 分支中的文件。 - ![commit-changes1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes1.png) - ### 步骤 3:创建项目 您需要创建两个项目,例如 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,分别代表开发环境和生产环境。待流水线成功运行,将在这两个项目中自动创建应用程序的相关部署 (Deployment) 和服务 (Service)。 {{< notice note >}} -您需要提前创建 `project-admin` 帐户,用作 CI/CD 流水线的审核者。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要提前创建 `project-admin` 帐户,用作 CI/CD 流水线的审核者。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 {{}} -1. 以 `project-admin` 身份登录 KubeSphere。在您创建 DevOps 工程的企业空间中创建以下两个项目。请确保邀请 `project-regular` 帐户至这两个项目中并赋予 `operator` 角色。 +1. 以 `project-admin` 身份登录 KubeSphere。在您创建 DevOps 项目的企业空间中创建以下两个项目。请确保邀请 `project-regular` 帐户至这两个项目中并赋予 `operator` 角色。 | 项目名称 | 别名 | | ---------------------- | ----------------------- | | kubesphere-sample-dev | development environment | | kubesphere-sample-prod | production environment | -2. 项目创建后,会显示在项目列表中,如下所示: - - ![项目列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/project-list.PNG) +2. 项目创建后,会显示在项目列表中。 ### 步骤 4:创建流水线 -1. 登出 KubeSphere,然后以 `project-regular` 身份重新登录,转到 DevOps 工程 `demo-devops`,点击**创建**构建新流水线。 +1. 登出 KubeSphere,然后以 `project-regular` 身份重新登录,转到 DevOps 项目 `demo-devops`,点击**创建**。 - ![创建流水线](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.PNG) +2. 在弹出的对话框中填入基本信息,将其命名为 `jenkinsfile-in-scm` 并在**代码仓库**下指定一个代码仓库。 -2. 在弹出对话框中填入基本信息,将其命名为 `jenkinsfile-in-scm` 并选择一个代码仓库。 +3. 在 **GitHub** 选项卡,从**凭证**的下拉菜单中选择 **github-token**,然后点击**确定**来选择您的仓库。 - ![创建流水线-2](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.PNG) +4. 选择您的 GitHub 帐户,与该令牌相关的所有仓库将在右侧列出。选择 **devops-maven-sample** 并点击**选择**,点击**下一步**继续。 -3. 在 **GitHub** 选项卡,从下拉菜单中选择 **github-token**,然后点击**确认**来选择您的仓库。 +5. 在**高级设置**中,选中**删除旧分支**旁边的方框。本教程中,您可以为**分支保留天数(天)**和**分支最大数量**使用默认值。 - ![select-token1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select-token1.png) + 删除旧分支意味着您将一并丢弃分支记录。分支记录包括控制台输出、已归档制品以及特定分支的其他相关元数据。更少的分支意味着您可以节省 Jenkins 正在使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧分支: -4. 选择您的 GitHub 帐户,与该令牌相关的所有仓库将在右侧列出。选择 **devops-java-sample** 并点击**选择此仓库**,点击**下一步**继续。 + - 分支保留天数(天):超过保留期限的分支将被删除。 - ![选择仓库](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select_repo.png) - -5. 在**高级设置**中,选中**丢弃旧的分支**旁边的方框。本教程中,您可以为**保留分支的天数**和**保留分支的最大个数**使用默认值。 - - ![分支设置](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.PNG) - - 丢弃旧的分支意味着您将一并丢弃分支记录。分支记录包括控制台输出、已归档制品以及特定分支的其他相关元数据。更少的分支意味着您可以节省 Jenkins 正在使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧分支: - - - 保留分支的天数:在一定天数之后,丢弃分支。 - - - 保留分支的最大个数:分支达到一定数量后,丢弃最旧的分支。 + - 分支最大数量:分支数量超过最大数量时,删除最旧的分支。 {{< notice note >}} - **保留分支的天数**和**保留分支的最大个数**可以同时应用于分支。只要某个分支满足其中一个字段所设置的条件,则会丢弃该分支。例如,如果您将保留天数和最大分支数分别指定为 2 和 3,待某个分支的保留天数超过 2 或者分支保留数量超过 3,则会丢弃该分支。KubeSphere 默认用 -1 预填充这两个字段,表示已删除的分支将被丢弃。 + **分支保留天数(天)**和**分支最大数量**可以同时应用于分支。只要某个分支满足其中一个字段所设置的条件,则会删除该分支。例如,如果您将保留天数和最大分支数分别指定为 2 和 3,待某个分支的保留天数超过 2 或者分支保留数量超过 3,则会删除该分支。KubeSphere 默认用 7 和 5 预填充这两个字段。 {{}} -6. 在**行为策略**中,KubeSphere 默认提供四种策略。本示例中不会使用**从 Fork 仓库中发现 PR** 这条策略,因此您可以删除该策略。您无需修改设置,可以直接使用默认值。 - - ![remove-behavioral-strategy1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy1.png) +6. 在**策略设置**中,KubeSphere 默认提供四种策略。本示例中不会使用**从 Fork 仓库中发现 PR** 这条策略,因此您可以删除该策略。您无需修改设置,可以直接使用默认值。 Jenkins 流水线运行时,开发者提交的 Pull Request (PR) 也将被视为一个单独的分支。 **发现分支** - - **排除也作为 PR 提交的分支**:不扫描源分支,例如源仓库的 master 分支。需要合并这些分支。 - - **只有被提交为 PR 的分支**:仅扫描 PR 分支。 - - **所有分支**:拉取源仓库中的所有分支。 + - **排除已提交 PR 的分支**:不扫描源分支,例如源仓库的 master 分支。需要合并这些分支。 + - **只包括已提交 PR 的分支**:仅扫描 PR 分支。 + - **包括所有分支**:拉取源仓库中的所有分支。 - **从原仓库中发现 PR** + **从原仓库发现 PR** - - **PR 与目标分支合并后的源代码版本**:PR 合并到目标分支后,基于源代码创建并运行流水线。 - - **PR 本身的源代码版本**:根据 PR 本身的源代码创建并运行流水线。 - - **发现 PR 时会创建两个流水线**:KubeSphere 创建两个流水线,一个流水线使用 PR 与目标分支合并后的源代码版本,另一个使用 PR 本身的源代码版本。 + - **拉取 PR 合并后的代码**:PR 合并到目标分支后,基于源代码创建并运行流水线。 + - **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 + - **分别创建两个流水线**:KubeSphere 创建两个流水线,一个流水线使用 PR 与目标分支合并后的源代码版本,另一个使用 PR 本身的源代码版本。 {{< notice note >}} - 您需要选择 GitHub 作为代码仓库才能启用此处的**行为策略**设置。 + 您需要选择 GitHub 作为代码仓库才能启用此处的**策略设置**设置。 {{}} 7. 向下滚动到**脚本路径**。该字段指定代码仓库中的 Jenkinsfile 路径。它表示仓库的根目录。如果文件位置变更,则脚本路径也需要更改。请将其更改为 `Jenkinsfile-online`,这是示例仓库中位于根目录下的 Jenkinsfile 的文件名。 - ![Jenkinsfile-online](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.PNG) - -8. 在**扫描 Repo Trigger** 中,点击**如果没有扫描触发,则定期扫描**并设置时间间隔为 **5 分钟**。点击**创建**完成配置。 - - ![高级设置](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/advanced-settings.PNG) +8. 在**扫描触发器**中,点击**定时扫描**并设置时间间隔为 **5 分钟**。点击**创建**完成配置。 {{< notice note >}} - 您可以设置特定的时间间隔让流水线扫描远程仓库,以便根据您在**行为策略**中设置的策略来检测代码更新或新的 PR。 + 您可以设置特定的时间间隔让流水线扫描远程仓库,以便根据您在**策略设置**中设置的策略来检测代码更新或新的 PR。 {{}} ### 步骤 5:运行流水线 -1. 流水线创建后,将显示在下图所示的列表中。点击该流水线进入其详情页面。 - - ![流水线列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.PNG) +1. 流水线创建后,点击该流水线名称进入其详情页面。 {{< notice note >}} - - 您可以点击该流水线右侧的三个点,然后选择**复制流水线**来创建该流水线的副本。如需并发运行不包含多分支的多个流水线,您可以将这些流水线全选,然后点击**运行**来批量运行它们。 + - 您可以点击该流水线右侧的 ,然后选择**复制**来创建该流水线的副本。如需并发运行不包含多分支的多个流水线,您可以将这些流水线全选,然后点击**运行**来批量运行它们。 - 流水线详情页显示**同步状态**,即 KubeSphere 和 Jenkins 的同步结果。若同步成功,您会看到**成功**图标中打上绿色的对号。 {{}} -2. 在**活动**选项卡下,正在扫描三个分支。点击右侧的**运行**,流水线将根据您设置的行为策略来运行。从下拉列表中选择 **sonarqube**,然后添加标签号,例如 `v0.0.2`。点击**确定**触发新活动。 - - ![流水线详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.PNG) - - ![标签名称](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.PNG) +2. 在**运行记录**选项卡下,正在扫描三个分支。点击右侧的**运行**,流水线将根据您设置的行为策略来运行。从下拉列表中选择 **sonarqube**,然后添加标签号,例如 `v0.0.2`。点击**确定**触发新活动。 {{< notice note >}} - - 如果您在此页面上未看到任何活动,则需要手动刷新浏览器或点击下拉菜单(**更多操作**按钮)中的**扫描远程分支**。 + - 如果您在此页面上未看到任何运行记录,则需要手动刷新浏览器或点击下拉菜单(**更多操作**按钮)中的**扫描远程分支**。 - 标签名称用于在 GitHub 和 Docker Hub 中指代新生成的发布版本和镜像。现有标签名称不能再次用于字段 `TAG_NAME`。否则,流水线将无法成功运行。 {{}} -3. 稍等片刻,您会看到一些活动停止,一些活动失败。点击第一个活动查看其详细信息。 - - ![活动失败](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/activity-failure.PNG) +3. 稍等片刻,您会看到一些运行停止,一些运行失败。点击第一个活动查看其详细信息。 {{< notice note >}} @@ -245,8 +209,6 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 4. 流水线在 `deploy to dev` 阶段暂停,您需要手动点击**继续**。请注意,在 Jenkinsfile 中分别定义了三个阶段 `deploy to dev`、`push with tag` 和 `deploy to production`,因此将对流水线进行三次审核。 - ![流水线继续](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.PNG) - 在开发或生产环境中,可能需要具有更高权限的人员(例如版本管理员)来审核流水线、镜像以及代码分析结果。他们有权决定流水线是否能进入下一阶段。在 Jenkinsfile 中,您可以使用 `input` 来指定由谁审核流水线。如果您想指定一个用户(例如 `project-admin`)来审核,您可以在 Jenkinsfile 中添加一个字段。如果有多个用户,则需要通过逗号进行分隔,如下所示: ```groovy @@ -257,36 +219,24 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 {{< notice note >}} - 在 KubeSphere 3.1 中,如果不指定审核员,那么能够运行流水线的帐户也能够继续或终止该流水线。流水线创建者、在该工程中具有 `admin` 角色的帐户或者您指定的帐户也有权限继续或终止流水线。 + 在 KubeSphere 3.2.x 中,如果不指定审核员,那么能够运行流水线的帐户也能够继续或终止该流水线。流水线创建者、在该项目中具有 `admin` 角色的用户或者您指定的帐户也有权限继续或终止流水线。 {{}} ### 步骤 6:检查流水线状态 -1. 在**运行状态**中,您可以查看流水线的运行状态。请注意,流水线在刚创建后将继续初始化几分钟。示例流水线有八个阶段,它们已在 [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online) 中单独定义。 - - ![查看流水线日志-1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.PNG) +1. 在**运行状态**中,您可以查看流水线的运行状态。请注意,流水线在刚创建后将继续初始化几分钟。示例流水线有八个阶段,它们已在 [Jenkinsfile-online](https://github.com/kubesphere/devops-maven-sample/blob/sonarqube/Jenkinsfile-online) 中单独定义。 2. 点击右上角的**查看日志**来查看流水线运行日志。您可以看到流水线的动态日志输出,包括可能导致流水线无法运行的错误。对于每个阶段,您都可以点击该阶段来查看其日志,而且可以将日志下载到本地计算机进行进一步分析。 - ![查看流水线日志-2](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.PNG) - ### 步骤 7:验证结果 -1. 流水线成功运行后,点击**代码质量**通过 SonarQube 查看结果,如下所示。 - - ![SonarQube 结果详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail-1.PNG) - - ![SonarQube 结果详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.PNG) +1. 流水线成功运行后,点击**代码检查**通过 SonarQube 查看结果,如下所示。 2. 按照 Jenkinsfile 中的定义,通过流水线构建的 Docker 镜像也已成功推送到 Docker Hub。在 Docker Hub 中,您会看到带有标签 `v0.0.2` 的镜像,该标签在流水线运行之前已指定。 - ![Docker Hub 镜像](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.PNG) - 3. 同时,GitHub 中已生成一个新标签和一个新发布版本。 - ![GitHub 结果](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-result.PNG) - 4. 示例应用程序将部署到 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,并创建相应的部署和服务。转到这两个项目,预期结果如下所示: | 环境 | URL | 命名空间 | 部署 | 服务 | @@ -294,14 +244,6 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 | Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | | Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | - #### 部署 - - ![流水线部署](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.PNG) - - #### 服务 - - ![流水线服务](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.PNG) - {{< notice note >}} 您可能需要在您的安全组中放行该端口,以便通过 URL 访问应用程序。 @@ -310,13 +252,9 @@ KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍 ### 步骤 8:访问示例服务 -1. 以 `admin` 身份登录 KubeSphere 并使用**工具箱**中的 **Web Kubectl** 访问该服务。转到 `kubesphere-sample-dev` 项目,然后在**应用负载**下的**服务**中选择 `ks-sample-dev`。Endpoint 可用于访问该服务。 +1. 以 `admin` 身份登录 KubeSphere 并使用**工具箱**中的 **kubectl** 访问该服务。转到 `kubesphere-sample-dev` 项目,然后在**应用负载**下的**服务**中点击 `ks-sample-dev`。在详情页获取 Endpoint 用于访问该服务。 - ![查看示例应用](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sample-app-result-check.PNG) - - ![访问 Endpoint](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.PNG) - -2. 在右下角的**工具箱**中使用 **Web Kubectl** 执行以下命令: +2. 在右下角的**工具箱**中使用 **kubectl** 执行以下命令: ```bash curl 10.233.120.230:8080 diff --git a/content/zh/docs/devops-user-guide/how-to-use/credential-management.md b/content/zh/docs/devops-user-guide/how-to-use/credential-management.md index 98edb425f..fda9e5788 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/credential-management.md +++ b/content/zh/docs/devops-user-guide/how-to-use/credential-management.md @@ -8,47 +8,41 @@ weight: 11230 凭证是包含敏感信息的对象,例如用户名和密码、SSH 密钥和令牌 (Token)。当 KubeSphere DevOps 流水线运行时,会与外部环境中的对象进行交互,以执行一系列任务,包括拉取代码、推送和拉取镜像以及运行脚本等。此过程中需要提供相应的凭证,而这些凭证不会明文出现在流水线中。 -具有必要权限的 DevOps 工程用户可以为 Jenkins 流水线配置凭证。用户在 DevOps 工程中添加或配置这些凭证后,便可以在 DevOps 工程中使用这些凭证与第三方应用程序进行交互。 +具有必要权限的 DevOps 项目用户可以为 Jenkins 流水线配置凭证。用户在 DevOps 项目中添加或配置这些凭证后,便可以在 DevOps 项目中使用这些凭证与第三方应用程序进行交互。 -目前,您可以在 DevOps 工程中存储以下 4 种类型的凭证: +目前,您可以在 DevOps 项目中创建以下 4 种类型的凭证: -![创建凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential_page.png) - -- **帐户凭证**:用户名和密码,可以作为单独的组件处理,或者作为用冒号分隔的字符串(格式为 `username:password`)处理,例如 GitHub、GitLab 和 Docker Hub 的帐户。 -- **SSH**:带有私钥的用户名,SSH 公钥/私钥对。 -- **秘密文本**:文件中的秘密内容。 +- **用户名和密码**:用户名和密码,可以作为单独的组件处理,或者作为用冒号分隔的字符串(格式为 `username:password`)处理,例如 GitHub、GitLab 和 Docker Hub 的帐户。 +- **SSH 密钥**:带有私钥的用户名,SSH 公钥/私钥对。 +- **访问令牌**:具有访问权限的令牌。 - **kubeconfig**:用于配置跨集群认证。如果选择此类型,将自动获取当前 Kubernetes 集群的 kubeconfig 文件内容,并自动填充在当前页面对话框中。 -本教程演示如何在 DevOps 工程中创建和管理凭证。有关如何使用凭证的更多信息,请参见[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)和[使用图形编辑面板创建流水线](../create-a-pipeline-using-graphical-editing-panel/)。 +本教程演示如何在 DevOps 项目中创建和管理凭证。有关如何使用凭证的更多信息,请参见[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)和[使用图形编辑面板创建流水线](../create-a-pipeline-using-graphical-editing-panel/)。 ## 准备工作 - 您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 您需要有一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`),并已邀请此帐户至 DevOps 工程中且授予 `operator` 角色。如果尚未准备好,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要有一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`),并已邀请此帐户至 DevOps 项目中且授予 `operator` 角色。如果尚未准备好,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建凭证 -以 `project-regular` 身份登录 KubeSphere 控制台。进入您的 DevOps 工程,选择**凭证**,然后点击**创建**。 - -![点击创建](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-step1.PNG) +以 `project-regular` 身份登录 KubeSphere 控制台。进入您的 DevOps 项目,选择**凭证**,然后点击**创建**。 ### 创建 Docker Hub 凭证 -1. 在弹出对话框中输入以下信息。 +1. 在弹出的对话框中输入以下信息。 - ![DockerHub 凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/dockerhub_credentials.png) - - - **凭证 ID**:设置可以在流水线中使用的 ID,例如 `dockerhub-id`。 - - **类型**:选择**帐户凭证**。 + - **名称**:设置可以在流水线中使用的 ID,例如 `dockerhub-id`。 + - **类型**:选择**用户名和密码**。 - **用户名**:您的 Docker Hub 帐户(即 Docker ID)。 - - **token / 密码**:您的 Docker Hub 密码。 + - **密码/令牌**:您的 Docker Hub 密码。 - **描述信息**:凭证的简介。 2. 完成操作后点击**确定**。 ### 创建 GitHub 凭证 -同样地,按照上述相同步骤创建 GitHub 凭证。设置不同的**凭证 ID**(例如 `github-id`),**类型**同样选择**帐户凭证**。分别在**用户名**和 **token / 密码**中输入您的 GitHub 用户名和密码。 +同样地,按照上述相同步骤创建 GitHub 凭证。设置不同的**名称**(例如 `github-id`),**类型**同样选择**用户名和密码**。分别在**用户名**和 **密码/令牌**中输入您的 GitHub 用户名和密码。 {{< notice note >}} @@ -68,18 +62,12 @@ weight: 11230 ## 查看和管理凭证 -1. 凭证创建后,会在列表中显示,如下所示。 - - ![凭证列表](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential_list.png) +1. 凭证创建后,会在列表中显示。 2. 点击任意一个凭证,进入其详情页面,您可以查看帐户详情和与此凭证相关的所有事件。 - ![凭证详情页面](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-detail_page.png) - 3. 您也可以在此页面上编辑或删除凭证。请注意,编辑凭证时,KubeSphere 不会显示现有用户名或密码信息。如果输入新的用户名和密码,则前一个将被覆盖。 - ![编辑凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/edit_credentials.png) - ## 另请参见 [使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/) diff --git a/content/zh/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md b/content/zh/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md index e00f409ed..10da18c7c 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md +++ b/content/zh/docs/devops-user-guide/how-to-use/gitlab-multibranch-pipeline.md @@ -8,19 +8,19 @@ weight: 11291 [GitLab](https://about.gitlab.com/) 是一个提供公开和私有仓库的开源代码仓库平台。它也是一个完整的 DevOps 平台,专业人士能够使用 GitLab 在项目中执行任务。 -在 KubeSphere v3.1 中,您可以使用 GitLab 在 DevOps 工程中创建多分支流水线。本教程介绍如何使用 GitLab 创建多分支流水线。 +在 KubeSphere 3.1.x 以及更新版本中,您可以使用 GitLab 在 DevOps 项目中创建多分支流水线。本教程介绍如何使用 GitLab 创建多分支流水线。 ## 准备工作 - 您需要准备一个 [GitLab](https://gitlab.com/users/sign_in) 帐户以及一个 [Docker Hub](https://hub.docker.com/) 帐户。 - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 您需要创建一个企业空间、一个 DevOps 项目以及一个帐户 (`project-regular`),该帐户必须被邀请至该 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目以及一个用户 (`project-regular`),该用户必须被邀请至该 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 ### 步骤 1:创建凭证 -1. 使用 `project-regular` 用户登录 KubeSphere 控制台。转到您的 DevOps 工程,在**工程管理**下的**凭证**中创建以下凭证。有关更多如何创建凭证的信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/credential-management/)。 +1. 使用 `project-regular` 用户登录 KubeSphere 控制台。转到您的 DevOps 项目,在 **DevOps 项目设置**下的**凭证**中创建以下凭证。有关更多如何创建凭证的信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/credential-management/)。 {{< notice note >}} @@ -36,29 +36,19 @@ weight: 11291 2. 创建完成后,您可以在列表中看到创建的凭证。 - ![credential-created](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/credential-created.png) - ### 步骤 2:在 GitLab 仓库中编辑 Jenkinsfile -1. 登录 GitLab 并创建一个公开项目。点击**导入项目**,选择**从 URL 导入仓库**,然后输入 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 的 URL。可见性级别选择**公开**,然后点击**新建项目**。 - - ![click-import-project](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-import-project.png) - - ![use-git-url](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/use-git-url.png) +1. 登录 GitLab 并创建一个公开项目。点击**导入项目**,选择**从 URL 导入仓库**,然后输入 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 的 URL。可见性级别选择**公开**,然后点击**新建项目**。 2. 在刚刚创建的项目中,从 master 分支中创建一个新分支,命名为 `gitlab-demo`。 - ![new-branch](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/new-branch.png) - 3. 在 `gitlab-demo` 分支中,点击根目录中的 `Jenkinsfile-online` 文件。 - ![gitlab-demo](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-demo.png) - 4. 点击**编辑**,分别将 `GITHUB_CREDENTIAL_ID`、`GITHUB_ACCOUNT` 以及 `@github.com` 更改为 `GITLAB_CREDENTIAL_ID`、`GITLAB_ACCOUNT` 以及 `@gitlab.com`,然后编辑下表所列条目。您还需要将 `push latest` 和 `deploy to dev` 中 `branch` 的值更改为 `gitlab-demo`。 | 条目 | 值 | 描述信息 | | -------------------- | --------- | ------------------------------------------------------------ | - | GITLAB_CREDENTIAL_ID | gitlab-id | 您在 KubeSphere 中为自己的 GitLab 帐户设置的**凭证 ID**,用于推送标签至您的 GitLab 仓库。 | + | GITLAB_CREDENTIAL_ID | gitlab-id | 您在 KubeSphere 中为自己的 GitLab 帐户设置的**名称**,用于推送标签至您的 GitLab 仓库。 | | DOCKERHUB_NAMESPACE | felixnoo | 请将其替换为您自己的 Docker Hub 帐户名称,也可以使用该帐户下的组织名称。 | | GITLAB_ACCOUNT | felixnoo | 请将其替换为您自己的 GitLab 帐户名称,也可以使用该帐户的用户组名称。 | @@ -70,34 +60,30 @@ weight: 11291 5. 点击 **Commit changes** 更新该文件。 - ![commit-changes](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/commit-changes.png) - ### 步骤 3:创建项目 您需要创建两个项目,例如 `kubesphere-sample-dev` 和 `kubesphere-sample-prod`,这两个项目分别代表开发环境和测试环境。有关更多信息,请参考[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/#步骤-3创建项目)。 ### 步骤 4:创建流水线 -1. 使用 `project-regular` 用户登录 KubeSphere Web 控制台。转到您的 DevOps 工程,点击**创建**来创建新流水线。 +1. 使用 `project-regular` 用户登录 KubeSphere Web 控制台。转到您的 DevOps 项目,点击**创建**来创建新流水线。 2. 在出现的对话框中填写基本信息。将流水线的名称设置为 `gitlab-multi-branch` 并选择一个代码仓库。 - ![create-pipeline](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/create-pipeline.png) - -3. 在 **GitLab** 选项卡下的 **GitLab 服务**中选择默认选项 `https://gitlab.com`,在**项目所属组**中输入该 GitLab 项目所属组的名称,然后从**仓库名称**的下拉菜单中选择 `devops-java-sample` 仓库。点击右下角的对号图标,然后点击**下一步**。 - - ![select-gitlab](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-gitlab.png) +3. 在 **GitLab** 选项卡下的 **GitLab 服务器地址**中选择默认选项 `https://gitlab.com`,在**项目组/所有者**中输入该 GitLab 项目所属组的名称,然后从**代码仓库**的下拉菜单中选择 `devops-maven-sample` 仓库。点击右下角的 **√**,然后点击**下一步**。 {{< notice note >}} - 如需使用 GitLab 私有仓库,则须在 GitLab 上创建拥有 API 和 read_repository 权限的个人访问令牌,在 Jenkins 面板上创建访问 GitLab 的凭证,然后在**系统配置**下的 **GitLab 服务**中添加该凭证。有关如何登录 Jenkins 的更多信息,请参考 [Jenkins 系统设置](../jenkins-setting/#登录-jenkins-重新加载配置)。 + 如需使用 GitLab 私有仓库,请参考以下步骤: + + - 在 GitLab 上前往**用户设置 > 访问令牌**,创建拥有 API 和 read_repository 权限的个人访问令牌。 + - [登录 Jenkins 面板](../../how-to-integrate/sonarqube/#步骤-5将-sonarqube-服务器添加至-jenkins),前往**系统管理 > Manage Credentials**,使用您的 GitLab 令牌创建 Jenkins 凭证,用于访问 GitLab。然后前往**系统管理 > 系统配置**,在 **GitLab 服务**中添加该凭证。 + - 在您的 DevOps 项目中,选择 **DevOps 项目设置 > 凭证**,使用您的 GitLab 令牌创建一个凭证。然后在创建流水线时,您需要在 **GitLab** 页签上的**凭证**中指定该凭证,以便流水线能够从您的 GitLab 私有仓库中拉取代码。 {{}} 4. 在**高级设置**选项卡中,下滑到**脚本路径**。将其更改为 `Jenkinsfile-online` 然后点击**创建**。 - ![jenkinsfile-online](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png) - {{< notice note >}} 该字段指定代码仓库中的 Jenkinsfile 路径,它表示该仓库的根目录。如果文件位置变更,则脚本路径也需要更改。 @@ -106,13 +92,9 @@ weight: 11291 ### 步骤 5:运行流水线 -1. 流水线创建后,会展示在列表中。点击流水线查看其详情页。 +1. 流水线创建后,会展示在列表中。点击流水线名称查看其详情页。 -2. 点击右侧的**运行**。在出现的对话框中,从下拉菜单中选择 **gitlab-demo** 并添加一个标签号,比如 `v0.0.2`。点击**确定**来触发一个新活动。 - - ![click-run](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-run.png) - - ![select-branch](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-branch.png) +2. 点击右侧的**运行**。在出现的对话框中,从下拉菜单中选择 **gitlab-demo** 并添加一个标签号,比如 `v0.0.2`。点击**确定**来触发一个新运行。 {{< notice note >}} @@ -124,22 +106,14 @@ weight: 11291 1. 在**运行状态**选项卡,您可以看到流水线的运行过程。点击右上角的**查看日志**来查看流水线运行日志。 - ![check-log](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/check-log.png) - 2. 您可以看到流水线的动态日志输出,包括可能导致流水线无法运行的错误。对于每个阶段,您都可以点击该阶段来查看日志,而且可以将日志下载到本地计算机进行进一步分析。 - ![pipeline-logs](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/pipeline-logs.png) - ### 步骤 7:验证结果 1. 如在 Jenkinsfile 中定义的那样,通过流水线构建的 Docker 镜像已成功推送到 Docker Hub。在 Docker Hub 中,您将看到在流水线运行前指定的带有标签 `v0.0.2` 的镜像。 - ![docker-image](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/docker-image.png) - 2. 同时,GitLab 中也已生成一个新标签。 - ![gitlab-result](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-result.png) - 3. 示例应用程序将会被部署至 `kubesphere-sample-dev` 和 `kubesphere-sample-prod` 中,也会创建相应的部署和服务。 | 环境 | URL | 命名空间 | 部署 | 服务 | @@ -147,10 +121,6 @@ weight: 11291 | 开发环境 | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev | | 生产环境 | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample | - ![deployment](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/deployment.png) - - ![service](/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/service.png) - {{< notice note >}} 您可能需要在安全组中打开端口,以便使用 URL 访问该应用。有关更多信息,请参考[访问示例服务](../create-a-pipeline-using-jenkinsfile/#步骤-8访问示例服务)。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/jenkins-email.md b/content/zh/docs/devops-user-guide/how-to-use/jenkins-email.md index a2bf4ddb8..2a9aef731 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/jenkins-email.md +++ b/content/zh/docs/devops-user-guide/how-to-use/jenkins-email.md @@ -12,23 +12,21 @@ Weight: 11260 ## 准备工作 - 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建具有该权限的新角色并将该角色分配给一个帐户。 +- 您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建具有该权限的新角色并将该角色分配给一个用户。 ## 设置电子邮件服务器 1. 点击左上角的**平台管理**,然后选择**集群管理**。 -2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入 Member 集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 +2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入成员集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 -3. 转到**应用负载**下的**工作负载**,然后从下拉列表中选择 **kubesphere-devops-system** 项目。点击 **ks-jenkins** 右侧的 以编辑其 YAML 配置文件。 +3. 转到**应用负载**下的**工作负载**,然后从下拉列表中选择 **kubesphere-devops-system** 项目。点击 `devops-jenkins` 右侧的 并选择**编辑 YAML** 以编辑其 YAML 配置文件。 - ![工作负载列表](/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/workloads_list.png) - -4. 向下滚动到下图所示的需要指定的字段。完成修改后,点击**更新**以保存。 +4. 向下滚动到下图所示的需要指定的字段。完成修改后,点击**确定**以保存。 {{< notice warning >}} - 在 `ks-jenkins` 部署 (Deployment) 中修改电子邮件服务器后,它会重新启动。因此,DevOps 系统将在几分钟内不可用,请在适当的时候进行此类修改。 + 在 `devops-jenkins` 部署 (Deployment) 中修改电子邮件服务器后,它会重新启动。因此,DevOps 系统将在几分钟内不可用,请在适当的时候进行此类修改。 {{}} diff --git a/content/zh/docs/devops-user-guide/how-to-use/jenkins-setting.md b/content/zh/docs/devops-user-guide/how-to-use/jenkins-setting.md index 4bc6c0135..19cf0675c 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/jenkins-setting.md +++ b/content/zh/docs/devops-user-guide/how-to-use/jenkins-setting.md @@ -30,59 +30,15 @@ KubeSphere 默认安装 Jenkins Configuration as Code 插件,您可以通过 Y 1. 以 `admin` 身份登录 KubeSphere,点击左上角的**平台管理**,然后选择**集群管理**。 -2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入 Member 集群,您可以选择一个特定集群来编辑 ConfigMap。如果您尚未启用多集群功能,请直接参考下一步。 +2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入成员集群,您可以选择一个特定集群来编辑 ConfigMap。如果您尚未启用多集群功能,请直接参考下一步。 -3. 在左侧导航栏中选择**配置中心**下的**配置**。在**配置**页面上,从下拉列表中选择 `kubesphere-devops-system`,然后点击 `jenkins-casc-config`。 +3. 在左侧导航栏中选择**配置**下的**配置字典**。在**配置字典**页面上,从下拉列表中选择 `kubesphere-devops-system`,然后点击 `jenkins-casc-config`。 - ![编辑 ConfigMap](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-configmap.png) +4. 在详情页面上,点击**更多操作**,在下拉列表中选择**编辑 YAML**。 -4. 在详情页面上,点击**更多操作**,在下拉列表中选择**编辑配置文件**。 +5. `jenkins-casc-config` 的配置模板是一个 YAML 文件,位于 `data.jenkins_user.yaml:` 部分。您可以在 ConfigMap 的代理 (Kubernetes Jenkins Agent) 中修改容器镜像、标签、资源请求 (Request) 和限制 (Limit) 等内容,或者在 podTemplate 中添加容器。完成操作后,点击**确定**。 - ![more-list](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/more-list.png) - -5. `jenkins-casc-config` 的配置模板是一个 YAML 文件,如下图所示。您可以在 ConfigMap 的代理 (Kubernetes Jenkins Agent) 中修改容器镜像、标签、资源请求 (Request) 和限制 (Limit) 等内容,或者在 podTemplate 中添加容器。完成操作后,点击**更新**。 - - ![编辑 Jenkins](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-jenkins.png) - -## 登录 Jenkins 重新加载配置 - -修改 `jenkins-casc-config` 后,您需要在 Jenkins 仪表板的 **Configuration as Code** 页面上重新加载更新后的系统配置。这是因为直接通过 Jenkins 仪表板配置的系统设置可能在 Jenkins 重新调度之后被 CasC 配置覆盖。 - -1. 执行以下命令获取 Jenkins 的地址。 - - ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) - export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - ``` - -2. 您可以看到如下所示的预期输出,获取 Jenkins 的 IP 地址和端口号。 - - ```bash - http://10.77.1.201:30180 - ``` - -3. 使用地址 `http://Node IP:Port Number` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。Jenkins 还配置有 KubeSphere LDAP,这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。 - - ![Jenkins 仪表板](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/jenkins-dashboard.png) - - {{< notice note >}} - - 取决于您的实例的部署位置,您可能需要设置必要的端口转发规则并在您的安全组中放行端口 `30180`,以便访问 Jenkins。 - - {{}} - -4. 登录仪表板后,点击导航栏中的**系统管理**。 - - ![manage-jenkins](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/manage-jenkins.png) - -5. 向下翻页并点击 **Configuration as Code**. - - ![configuration-as-code](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/configuration-as-code.png) - -6. 要重新加载 ConfigMap 中已修改的配置,请点击**应用新配置**。 - - ![应用配置](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/apply-config.png) +6. 请至少等待 70 秒,您的改动会自动重新加载。 7. 有关如何通过 CasC 设置 Jenkins 的更多信息,请参见 [Jenkins 文档](https://github.com/jenkinsci/configuration-as-code-plugin)。 @@ -90,4 +46,5 @@ KubeSphere 默认安装 Jenkins Configuration as Code 插件,您可以通过 Y 在当前版本中,并非所有插件都支持 CasC 设置。CasC 仅会覆盖通过 CasC 设置的插件配置。 - {{}} \ No newline at end of file + {{}} + diff --git a/content/zh/docs/devops-user-guide/how-to-use/jenkins-shared-library.md b/content/zh/docs/devops-user-guide/how-to-use/jenkins-shared-library.md index dcb870b1a..d4b324e1d 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/jenkins-shared-library.md +++ b/content/zh/docs/devops-user-guide/how-to-use/jenkins-shared-library.md @@ -13,21 +13,17 @@ weight: 11292 ## 准备工作 - [启用 KubeSphere DevOps 系统](https://kubesphere.io/zh/docs/pluggable-components/devops/)。 -- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`)。必须邀请此帐户至 DevOps 工程中,并且授予 `operator` 角色。有关详细信息,请参阅[创建企业空间、项目、帐户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目和一个用户 (`project-regular`)。必须邀请此帐户至 DevOps 项目中,并且授予 `operator` 角色。有关详细信息,请参阅[创建企业空间、项目、用户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。 - 您需要一个可用 Jenkins 共享库。本教程以 [GitHub 仓库](https://github.com/devops-ws/jenkins-shared-library)中的 Jenkins 共享库为例。 ## 在 Jenkins 仪表盘配置共享库 -1. [登录 Jenkins 仪表板](https://kubesphere.io/zh/docs/devops-user-guide/how-to-use/jenkins-setting/#log-in-to-jenkins-to-reload-configurations)并点击左侧导航栏中的**系统管理**。 +1. [登录 Jenkins 仪表板](../../how-to-integrate/sonarqube/#步骤-5将-sonarqube-服务器添加至-jenkins)并点击左侧导航栏中的**系统管理**。 2. 向下滚动并点击**系统配置**。 - ![click_configure](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-configure.png) - 3. 向下滚动到 **Global Pipeline Libraries**,然后点击**新增**。 - ![click-add](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-add.png) - 4. 配置字段如下所示。 - **Name:** 为共享库设置名称(例如,``demo-shared-library``),以便在 Jenkinsfile 中引用此名称来导入共享库。 @@ -38,13 +34,11 @@ weight: 11292 - 在 **Source Code Management** 下,选择 **Git** 并为**项目仓库**输入示例仓库的 URL 。如果您使用自己的仓库且访问此仓库需要凭据,则需要配置**凭据**。 - ![configure-shared-library](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/configure-shared-library.png) - 5. 当您结束编辑,请点击**应用**。 {{< notice note >}} - 您还可以配置[#文件夹级别的共享库](https://www.jenkins.io/zh/doc/book/pipeline/shared-libraries/#folder-level-shared-libraries)。 + 您还可以配置[文件夹级别的共享库](https://www.jenkins.io/zh/doc/book/pipeline/shared-libraries/#folder-level-shared-libraries)。 {{}} @@ -53,22 +47,16 @@ weight: 11292 ### 步骤 1: 创建流水线 -1. 用 `project-regular` 帐户登录 KubeSphere web 控制台。进入 DevOps 工程并点击**流水线**页面上的**创建**。 +1. 用 `project-regular` 帐户登录 KubeSphere web 控制台。进入 DevOps 项目并点击**流水线**页面上的**创建**。 2. 在弹出窗口中设置名称(例如,``demo-shared-library``),点击**下一步**。 - ![set-name](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/set-name.png) - 3. 在**高级设置**中,直接点击**创建**,使用默认设置创建流水线。 - ![click-create](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-create.png) - ### 步骤 2:编辑流水线 1. 在流水线列表中,点击流水线以转到其详细信息页面,然后点击**编辑 Jenkinsfile**。 - ![edit-jenkinsfile](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/edit-jenkinsfile.png) - 2. 在显示的对话框中,输入以下示例 Jenkinsfile。完成编辑后,点击**确定**。 ```groovy @@ -127,14 +115,8 @@ weight: 11292 ### 步骤 3:运行流水线 -1. 您可以在**流水线**选项卡下查看该阶段。点击**运行**运行它。 - - ![click-run](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-run.png) +1. 您可以在**任务状态**选项卡下查看该阶段。点击**运行**运行它。 2. 在一段时间后,流水线将成功运行。 - ![run-successfully](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/run-successfully.png) - -3. 您可以点击**状态**下的**成功**记录,然后点击**查看日志**查看日志详细信息。 - - ![log-details](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/log-details.png) +3. 您可以点击**运行记录**下的**成功**记录,然后点击**查看日志**查看日志详细信息。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/pipeline-settings.md b/content/zh/docs/devops-user-guide/how-to-use/pipeline-settings.md index cc8459b71..c539301cf 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/pipeline-settings.md +++ b/content/zh/docs/devops-user-guide/how-to-use/pipeline-settings.md @@ -1,7 +1,7 @@ --- title: "流水线设置" keywords: 'KubeSphere, Kubernetes, Docker, Jenkins, 流水线' -description: '了解 DevOps 工程中流水线的各个属性。' +description: '了解 DevOps 项目中流水线的各个属性。' linkTitle: "流水线设置" weight: 11280 --- @@ -10,169 +10,141 @@ weight: 11280 ## 准备工作 -- 您需要创建一个企业空间、一个 DevOps 工程以及一个帐户 (`project-regular`),必须邀请该帐户至该 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个 DevOps 项目以及一个用户 (`project-regular`),必须邀请该用户至该 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -## 流水线设置 - -### 基本信息 - -![basic-info-tab1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/basic-info-tab1.png) +## 基本信息 在**基本信息**选项卡,您可以自定义以下信息: -- **名称**:流水线的名称,同一个 DevOps 工程内的流水线不能重名。 +- **名称**:流水线的名称,同一个 DevOps 项目内的流水线不能重名。 -- **项目**:项目将根据工程资源进行分组,可以按工程对资源进行查看管理。 +- **DevOps 项目**:流水线所属的 DevOps 项目。 - **描述信息**:描述流水线的附加信息,描述信息不超过 256 个字符。 -- **代码仓库(选填)**:您可以选择一个代码仓库作为流水线的代码源。在 KubeSphere v3.1 中,您可以选择 GitHub、GitLab、Bitbucket、Git 以及 SVN 作为代码源。 +- **代码仓库(可选)**:您可以选择一个代码仓库作为流水线的代码源。您可以选择 GitHub、GitLab、Bitbucket、Git 以及 SVN 作为代码源。 {{< tabs >}} {{< tab "GitHub" >}} - ![code-source-github1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-github1.png) - - 如果选择 **GitHub**,则必须指定用于访问 GitHub 的令牌 (Token)。如果您已预先使用您的 GitHub 令牌创建了凭证,则可以从下拉菜单中选择已有凭证,或者点击**新建凭证**来创建新凭证。选择令牌后,点击**确认**,即可在右侧查看您的仓库。完成所有操作后,记得点击 **√** 图标。 + 如果选择 **GitHub**,则必须指定用于访问 GitHub 的凭证。如果您已预先使用您的 GitHub 令牌创建了凭证,则可以从下拉菜单中选择已有凭证,或者点击**创建凭证**来创建新凭证。选择凭证后,点击**确定**,即可在右侧查看您的仓库。完成所有操作后,请点击 **√** 图标。 {{}} {{< tab "GitLab" >}} - ![code-source-gitlab1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-gitlab1.png) - - 如果选择 **GitLab**,则必须指定 GitLab 服务、项目所属组和仓库名称。如果获取仓库代码需要凭证,则需要指定一个凭证。完成所有操作后,记得点击 **√** 图标。 + 如果选择 **GitLab**,则必须指定 GitLab 服务器地址、项目组/所有者和代码仓库。如果访问代码仓库需要凭证,则需要指定一个凭证。完成所有操作后,请点击 **√** 图标。 {{}} {{< tab "Bitbucket" >}} - ![code-source-bitbucket1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-bitbucket1.png) - - 如果选择 **Bitbucket**,则需要输入您的 Bitbucket Server。您可以预先使用您的 Bitbucket 用户名和密码创建一个凭证,或者点击**创建凭证**来创建一个新凭证。输入信息后点击**确认**,即可在右侧看到您的仓库。完成所有操作后,记得点击 **√** 图标。 + 如果选择 **Bitbucket**,则需要输入您的 Bitbucket 服务器地址。您可以预先使用您的 Bitbucket 用户名和密码创建一个凭证,或者点击**创建凭证**来创建一个新凭证。输入信息后点击**确定**,即可在右侧看到您的仓库。完成所有操作后,请点击 **√** 图标。 {{}} {{< tab "Git" >}} - ![code-source-git1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-git1.png) - - 如果选择 **Git**,则需要指定仓库 URL。如果获取仓库代码需要凭证,则需要指定一个凭证。您也可以点击**新建凭证**来添加新凭证。完成所有操作后,记得点击 **√** 图标。 + 如果选择 **Git**,则需要指定仓库 URL。如果访问代码仓库需要凭证,则需要指定一个凭证。您也可以点击**创建凭证**来添加新凭证。完成所有操作后,请点击 **√** 图标。 {{}} {{< tab "SVN" >}} - ![code-source-svn1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-svn1.png) - - 如果选择 **SVN**,则需要指定远程仓库地址和凭证。您也可以按需指定包括分支和排除分支。完成所有操作后,记得点击 **√** 图标。 + 如果选择 **SVN**,则需要指定仓库地址和凭证。您也可以按需指定包括分支和排除分支。完成所有操作后,请点击 **√** 图标。 {{}} {{}} -### 选择代码仓库后进行高级设置 +## 指定代码仓库时的高级设置 -如果您已选择一个代码仓库,则可以在**高级设置**选项卡上自定义以下配置: +如果您指定一个代码仓库,则可以在**高级设置**选项卡上自定义以下配置: -**分支设置** +### 分支设置 -![branch-settings1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/branch-settings1.png) +**删除旧分支**:自动删除旧分支。分支记录将一起被删除。分支记录包括控制台输出、存档制品以及与特定分支相关的其他元数据。保留较少的分支可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧的分支: -**丢弃旧的分支**意味着分支记录将一起被丢弃。分支记录包括控制台输出、存档制品以及与特定分支相关的其他元数据。保留较少的分支可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧的分支: +- **分支保留天数(天)**:超过保留期限的分支将被删除。 -- **保留分支的天数**:如果分支达到保留的天数,将进行删除。 - -- **保留分支的最大个数**:如果分支达到保留的个数,将删除最旧的分支。 +- **分支最大数量**:如果分支数量超过最大数量,将删除最旧的分支。 {{< notice note >}} - **保留分支的天数**和**保留分支的最大个数**同时适用于分支。只要分支满足任一字段的条件,则将被丢弃。例如,如果将保留分支的天数指定为 2,将保留分支的最大个数指定为 3,那么超过任一数目的分支将被丢弃。KubeSphere 默认用 -1 预先填充这两个字段,这意味着删除的分支将被丢弃。 + **分支保留天数(天)**和**分支最大数量**同时适用于分支。只要分支满足任一字段的条件,则将被丢弃。例如,如果将保留分支的天数指定为 2,将保留分支的最大个数指定为 3,那么超过任一数目的分支将被丢弃。KubeSphere 默认用 7 和 5 预先填充这两个字段。 {{}} -**行为策略** +### 策略设置 -![behavioral-strategy1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/behavioral-strategy1.png) - -在**行为策略**中,KubeSphere 默认提供四种策略。Jenkins 流水线运行时,开发者提交的 PR (Pull Request) 也将被视为单独的分支。 +在**策略设置**中,KubeSphere 默认提供四种策略。Jenkins 流水线运行时,开发者提交的 PR (Pull Request) 也将被视为单独的分支。 **发现分支** -- **排除也作为 PR 提交的分支**:不像扫描原仓库 master 分支那样扫描源分支,这些分支需要合并。 -- **只有被提交为 PR 的分支**:只扫描 PR 分支。 -- **所有分支**:从原仓库中拉取所有分支。 +- **排除已提交 PR 的分支**:已提交 PR 的分支将被排除。 +- **只包括已提交 PR 的分支**:只拉取已提交 PR 的分支。 +- **包括所有分支**:从仓库中拉取所有分支。 -**发现 Tag 分支** +**发现标签** -- **启用发现 Tag 分支**:拥有指定标签 (Tag) 的分支将会被扫描。 -- **停用发现 Tag 分支**:拥有指定标签的分支将不会被扫描。 +- **开启标签发现**:拥有指定标签的分支将被扫描。 +- **关闭标签发现**:拥有指定标签的分支不会被扫描。 -**从原仓库中发现 PR** +**从原仓库发现 PR** -- **PR 与目标分支合并后的源代码版本**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 -- **PR 本身的源代码版本**:根据 PR 本身的源代码创建并运行流水线。 +- **拉取 PR 合并后的代码**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 +- **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 - **当 PR 被发现时会创建两个流水线**:KubeSphere 会创建两个流水线,一个流水线使用 PR 本身的源代码版本,一个流水线使用 PR 与目标分支合并后的源代码版本。 **从 Fork 仓库中发现 PR** -- **PR 与目标分支合并后的源代码版本**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 -- **PR 本身的源代码版本**:根据 PR 本身的源代码创建并运行流水线。 +- **拉取 PR 合并后的代码**:PR 合并到目标分支后,将基于源代码创建并运行流水线。 +- **拉取 PR 提交时的代码**:根据 PR 本身的源代码创建并运行流水线。 - **当 PR 被发现时会创建两个流水线**:KubeSphere 会创建两个流水线,一个流水线使用 PR 本身的源代码版本,一个流水线使用 PR 与目标分支合并后的源代码版本。 - **贡献者**:对 PR 做出贡献的用户。 - **所有人**:每个可以访问 PR 的用户。 -- **管理员或有编辑权限的用户**:仅限于对 PR 具有管理员或编辑权限的用户。 +- **具有管理员或有编辑权限的用户**:仅限于对 PR 具有管理员或编辑权限的用户。 - **无**:如果选择此选项,那么无论在**拉取策略**中选择了哪个选项,都不会发现 PR。 -**脚本路径** +### 正则过滤 -![script-path1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/script-path1.png) +勾选选框以指定正则表达式来过滤分支、PR 和标签。 -**脚本路径**字段指定代码仓库中的 Jenkinsfile 路径,它指代仓库的根目录。如果文件位置发生更改,则脚本路径也需要更改。 +### 脚本路径 -**扫描 Repo Trigger** +**脚本路径**参数指定代码仓库中的 Jenkinsfile 路径,它指代仓库的根目录。如果文件位置发生更改,则脚本路径也需要更改。 -![scan-repo-trigger1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/scan-repo-trigger1.png) +### 扫描触发器 -您可以勾选**启用正则表达式,将忽略与提供的正则表达式不匹配的名称(包括分支与 PR 等)**,指定一个正则表达式作为扫描分支的触发器。 +勾选**定时扫描**,并从下拉列表中设置扫描时间间隔。 -您也可以勾选**如果没有扫描出发,则定期扫描**,并从下拉列表中设置扫描时间间隔。 +### 构建触发器 -**构建触发器** +您可以从**创建流水线时触发**和**删除流水线时触发**的下拉列表中选择一个流水线,以便在创建新的流水线或删除流水线时自动触发指定流水线中的任务。 -![build-trigger1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger1.png) - -您可以从**当创建流水线**和**当删除流水线**的下拉列表中选择一个流水线,以便在创建新的流水线或删除流水线时自动触发指定流水线中的任务。 - -**Git 克隆参数** - -![git-clone-options1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/git-clone-options1.png) +### 克隆设置 - **克隆深度**:克隆时需要提取的 commit 数量。 -- **流水线 clone 超时时间(单位:分钟)**:完成克隆过程所需要的时长(以分钟为单位)。 -- **是否开启浅克隆**:如果您开启浅克隆,则克隆的代码不会包含标签。 +- **克隆超时时间(min)**:完成克隆过程所需要的时长(以分钟为单位)。 +- **开启浅克隆**:如果您开启浅克隆,则克隆的代码不会包含标签。 -**Webhook 推送** +### Webhook -![webhook-push1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/webhook-push1.png) +**Webhook** 能有效地让流水线发现远程代码仓库中的更改,并自动触发新一轮运行。Webhook 应成为触发 Jenkins 自动扫描 GitHub 和 Git(例如 GitLab)的主要方法。 -**Webhook 推送**能有效地让流水线发现远程代码仓库中的更改,并自动触发新一轮运行。Webhook 应成为触发 Jenkins 自动扫描 GitHub 和 Git(例如 GitLab)的主要方法。 +## 不指定代码仓库时的高级设置 -### 不选择代码仓库后进行高级设置 +如果不指定代码仓库,则可以在**高级设置**选项卡上自定义以下配置: -如果不选择代码仓库,则可以在**高级设置**选项卡上自定义以下配置: +### 构建设置 -**构建设置** +**删除过期构建记录**:确定何时删除分支下的构建记录。构建记录包括控制台输出、存档制品以及与特定构建相关的其他元数据。保留较少的构建可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定应何时删除旧的构建: -![build-settings1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-settings1.png) +- **构建记录保留期限(天)**:超过保留期限的构建记录将被删除。 -**丢弃旧的构建**确定何时应丢弃分支下的构建记录。构建记录包括控制台输出、存档制品以及与特定构建相关的其他元数据。保留较少的构建可以节省 Jenkins 所使用的磁盘空间。KubeSphere 提供两个选项来确定应何时丢弃旧的构建: - -- **保留构建的天数**:如果构建达到保留的天数,将进行删除。 - -- **保留构建的最大个数**:如果构建超过一定的数量,将丢弃最旧的构建。 +- **构建记录最大数量**:当构建记录数量超过允许的最大数量,最早的构建记录将被删除。 {{< notice note >}} @@ -182,18 +154,13 @@ weight: 11280 - **不允许并发构建**:如果勾选此选项,则不能并发运行多个构建。 -**参数化构建** +### 构建参数 -![parametric-build1](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/parametric-build1.png) +参数化的构建过程允许您在开始运行流水线时传入一个或多个参数。KubeSphere 默认提供五种参数类型,包括**字符串**、**多行字符串**、**布尔值**、**选项** 以及**密码**。当参数化项目时,构建会被替换为参数化构建,其中将提示用户为每个定义的参数输入值。 -参数化的构建过程允许您在开始运行流水线时传入一个或多个参数。KubeSphere 默认提供五种参数类型,包括**字符串参数 (String)**、**文本 (Text)**、**布尔值 (Boolean)**、**选项参数 (Choice)** 以及**密码参数 (Password)**。当参数化项目时,构建会被替换为参数化构建,其中将提示用户为每个定义的参数输入值。 +### 构建触发器 -**构建触发器** - -![build-trigger--2](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger--2.png) - -- **定时构建**:允许定期执行构建。您可以点击 **CRON** 来参照详细的 cron 语法。 -- **触发远程构建(例如,使用脚本)**:如果您需要访问预定义的 URL 来远程触发构建,则必须勾选该选项并提供身份验证令牌,这样只有拥有令牌的用户才能远程触发构建。 +**定时构建**:允许定期执行构建。点击**了解更多**来参照详细的 CRON 语法。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/pipeline-webhook.md b/content/zh/docs/devops-user-guide/how-to-use/pipeline-webhook.md index 84503dedc..7287a86cb 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/pipeline-webhook.md +++ b/content/zh/docs/devops-user-guide/how-to-use/pipeline-webhook.md @@ -13,78 +13,54 @@ weight: 11293 ## 准备工作 - [启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -- 创建一个企业空间、一个 DevOps工程和一个帐户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 创建一个企业空间、一个 DevOps 项目和一个用户(例如,`project-regular`)。`project-regular` 需要被邀请至 DevOps 项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 通过远程代码仓库创建一个基于 Jenkinsfile 的流水线。有关更多信息,请参见[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)。 ## 配置 Webhook ### 获取 webhook URL -1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 工程,点击流水线(例如,`jenkins-in-scm`)以查看详情页面。 +1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 项目,点击流水线(例如,`jenkins-in-scm`)以查看详情页面。 -2. 点击**更多**,在下拉菜单中选择**编辑配置**。 +2. 点击**更多**,在下拉菜单中选择**编辑设置**。 - ![edit-config](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-config.png) - -3. 在出现的会话框中,滑动至 **Webhook 推送** 以获得 webhook push URL。 - - ![webhook-push](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-push.png) +3. 在出现的会话框中,滑动至 **Webhook** 以获得 webhook push URL。 ### 在 GitHub 仓库中设置 webhook -1. 登录您的 GitHub,并转到 `devops-java-sample` 仓库。 +1. 登录您的 GitHub,并转到 `devops-maven-sample` 仓库。 2. 点击 **Settings**,然后点击 **Webhooks**,然后点击 **Add webhook**。 - ![click-add-webhook](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-add-webhook.png) - 3. 在 **Payload URL** 中输入流水线中的 webhook push URL,然后点击 **Add webhook**。出于演示需要,本教程选择 **Just the push event**。您可以根据需要进行配置。有关更多信息,请参见 [GitHub 文档](https://docs.github.com/en/developers/webhooks-and-events/webhooks/creating-webhooks)。 - ![add-webhook](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/add-webhook.png) - 4. 配置好的 webhook 会展示在 **Webhooks** 页面。 - ![webhook-ready](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-ready.png) - ## 使用 Webhook 触发流水线 ### 提交拉取请求到仓库 -1. 在您仓库的 **Code** 页面,点击 **master** 然后选择 **sonarqube**。 - - ![click-sonar](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-sonar.png) +1. 在您仓库的 **Code** 页面,点击 **master** 然后选择 **sonarqube** 分支。 2. 转到 `/deploy/dev-ol` 然后点击文件 `devops-sample.yaml`。 - ![click-file](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-file.png) - 3. 点击 以编辑文件。 例如,将 `spec.replicas` 的值改变为 `3`。 - ![edit-file](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-file.png) - 4. 在页面底部点击 **Commit changes**。 ### 检查 webhook 交付 1. 在您仓库的 **Webhooks** 页面,点击 webhook。 - ![webhook-ready](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-ready.png) - 2. 点击 **Recent Deliveries**,然后点击一个具体交付记录查看详情。 - ![delivery-detail](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/delivery-detail.png) - ### 检查流水线 -1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 工程,点击流水线。 +1. 使用 `project-regular` 帐户登录 Kubesphere Web 控制台。转到 DevOps 项目,点击流水线。 -2. 在**活动**选项卡,检查提交到远程仓库 `sonarqube` 分支的拉取请求是否触发了新的运行。 - - ![pipeline-triggered](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pipeline-triggered.png) +2. 在**运行记录**选项卡,检查提交到远程仓库 `sonarqube` 分支的拉取请求是否触发了新的运行。 3. 转到 `kubesphere-sample-dev` 项目的 **Pods** 页面,检查 3 个 Pods 的状态。如果 3 个 Pods 为运行状态,表示流水线运行正常。 - ![pods](/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pods.png) - diff --git a/content/zh/docs/devops-user-guide/how-to-use/set-ci-node.md b/content/zh/docs/devops-user-guide/how-to-use/set-ci-node.md index 4b1823cc1..8fbead71f 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/set-ci-node.md +++ b/content/zh/docs/devops-user-guide/how-to-use/set-ci-node.md @@ -12,29 +12,23 @@ weight: 11270 ## 准备工作 -您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建一个具有该权限的新角色并将该新角色其分配给一个帐户。 +您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建一个具有该权限的新角色并将该新角色其分配给一个用户。 ## 标记 CI 节点 1. 点击左上角的**平台管理**,然后选择**集群管理**。 -2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入 Member 集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 +2. 如果您已经启用[多集群功能](../../../multicluster-management/)并已导入成员集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。 -3. 转到**节点管理**下的**集群节点**,您可以在其中查看当前集群中的现有节点。 +3. 转到**节点**下的**集群节点**,您可以在其中查看当前集群中的现有节点。 - ![节点管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/node-management.png) +4. 从列表中选择一个节点用来运行 CI 任务。点击节点名称以转到其详情页面。点击**更多操作**,然后选择**编辑标签**。 -4. 从列表中选择一个节点用来运行 CI 任务。例如,在此处选择 `node02`,点击它以转到其详情页面。点击**更多操作**,然后选择**编辑标签**。 - - ![选择 CI 节点](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-ci-node.png) - -5. 在弹出对话框中,您可以看到一个标签的键是 `node-role.kubernetes.io/worker`。输入 `ci` 作为此标签的值,然后点击**保存**。 - - ![添加 CI 标签](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-ci-label.png) +5. 在弹出的对话框中,您可以看到一个标签的键是 `node-role.kubernetes.io/worker`。输入 `ci` 作为此标签的值,然后点击**保存**。 {{< notice note >}} - 您也可以点击**添加标签**来按需添加新标签。 + 您也可以点击**添加**来按需添加新标签。 {{}} @@ -42,18 +36,12 @@ weight: 11270 流水线和 S2I/B2I 工作流基本上会根据[节点亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)调度到该节点。如果要将节点专用于 CI 任务,即不允许将其他工作负载调度到该节点,您可以在该节点上添加[污点](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。 -1. 点击**更多操作**,然后选择**污点管理**。 +1. 点击**更多操作**,然后选择**编辑污点**。 - ![选择污点管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-taint-management.png) - -2. 点击**添加污点**,然后输入键 `node.kubernetes.io/ci` 而不指定值。您可以根据需要选择 `不允许调度 (NoSchedule)` 或 `尽量不调度 (PreferNoSchedule)` 。 - - ![添加污点](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-taint.png) +2. 点击**添加污点**,然后输入键 `node.kubernetes.io/ci` 而不指定值。您可以根据需要选择 `阻止调度`、`尽可能阻止调度`或`阻止调度并驱逐现有容器组` 。 3. 点击**保存**。KubeSphere 将根据您设置的污点调度任务。您现在可以回到 DevOps 流水线上进行操作。 - ![污点已添加](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/taint-result.png) - {{< notice tip >}} 本教程还涉及与节点管理有关的操作。有关详细信息,请参见[节点管理](../../../cluster-administration/nodes/)。 diff --git a/content/zh/docs/devops-user-guide/how-to-use/use-pipeline-templates.md b/content/zh/docs/devops-user-guide/how-to-use/use-pipeline-templates.md index 4d49fbed0..d342159fd 100644 --- a/content/zh/docs/devops-user-guide/how-to-use/use-pipeline-templates.md +++ b/content/zh/docs/devops-user-guide/how-to-use/use-pipeline-templates.md @@ -6,9 +6,9 @@ linkTitle: "使用流水线模板" weight: 11290 --- -KubeSphere 提供图形编辑面板,您可以通过交互式操作定义 Jenkins 流水线的阶段和步骤。KubeSphere v3.1 中提供两个内置流水线模板,作为持续集成 (CI) 和持续交付 (CD) 的框架。 +KubeSphere 提供图形编辑面板,您可以通过交互式操作定义 Jenkins 流水线的阶段和步骤。KubeSphere 3.2.1 中提供两个内置流水线模板,作为持续集成 (CI) 和持续交付 (CD) 的框架。 -在 KubeSphere 的 DevOps 工程中创建了流水线后,您可以点击该流水线查看其详情,然后点击**编辑流水线**按需选择一个流水线模板。本文档对这两个流水线模板的概念进行阐述。 +在 KubeSphere 的 DevOps 项目中创建了流水线后,您可以点击该流水线查看其详情,然后点击**编辑流水线**按需选择一个流水线模板。本文档对这两个流水线模板的概念进行阐述。 ## CI 流水线模板 diff --git a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/_index.md b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/_index.md index 72a253e2b..23d06d1f7 100644 --- a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/_index.md +++ b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/_index.md @@ -1,5 +1,5 @@ --- -linkTitle: "理解并管理 DevOps 工程" +linkTitle: "理解并管理 DevOps 项目" weight: 11100 _build: diff --git a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md index eb032490f..844525f06 100644 --- a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md +++ b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management.md @@ -1,42 +1,34 @@ --- -title: "DevOps 工程管理" +title: "DevOps 项目管理" keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins' -description: '创建并管理 DevOps 工程,了解 DevOps 工程中的各项基本元素。' -linkTitle: "DevOps 工程管理" +description: '创建并管理 DevOps 项目,了解 DevOps 项目中的各项基本元素。' +linkTitle: "DevOps 项目管理" weight: 11120 --- -本教程演示如何创建和管理 DevOps 工程。 +本教程演示如何创建和管理 DevOps 项目。 ## 准备工作 -- 您需要创建一个企业空间和一个帐户 (`project-admin`),必须邀请该帐户至该企业空间并赋予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间和一个用户 (`project-admin`),必须邀请该用户至该企业空间并赋予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 -## 创建 DevOps 工程 +## 创建 DevOps 项目 -1. 以 `project-admin` 身份登录 KubeSphere 控制台,转到 **DevOps 工程**,然后点击**创建**。 +1. 以 `project-admin` 身份登录 KubeSphere 控制台,转到 **DevOps 项目**,然后点击**创建**。 - ![创建 DevOps 工程](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create-1.png) +2. 输入 DevOps 项目的基本信息,然后点击**确定**。 -2. 输入 DevOps 工程的基本信息,然后点击**确定**。 + - **名称**:此 DevOps 项目的简明名称,便于用户识别,例如 `demo-devops`。 + - **别名**:此 DevOps 项目的别名。 + - **描述信息**:此 DevOps 项目的简要介绍。 + - **集群设置**:在当前版本中,DevOps 项目无法同时跨多个集群运行。如果您已启用[多集群功能](../../../multicluster-management/),则必须选择一个集群来运行 DevOps 项目。 - ![输入基本信息](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops-2.png) +3. DevOps 项目创建后,会显示在下图所示的列表中。 - - **名称**:此 DevOps 工程的简明名称,便于用户识别,例如 `demo-devops`。 - - **别名**:此 DevOps 工程的别名。 - - **描述信息**:此 DevOps 工程的简要介绍。 - - **集群设置**:在当前版本中,DevOps 工程无法同时跨多个集群运行。如果您已启用[多集群功能](../../../multicluster-management/),则必须选择一个集群来运行 DevOps 工程。 +## 查看 DevOps 项目 -3. DevOps 工程创建后,会显示在下图所示的列表中。 - - ![DevOps 列表](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list-3.png) - -## 查看 DevOps 工程 - -点击刚刚创建的 DevOps 工程,转到其详情页面。具有不同权限的租户可以在 DevOps 工程中执行各种任务,包括创建 CI/CD 流水线和凭证以及管理帐户和角色。 - -![DevOps 详情页面](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page-4.png) +点击刚刚创建的 DevOps 项目,转到其详情页面。具有不同权限的租户可以在 DevOps 项目中执行各种任务,包括创建 CI/CD 流水线和凭证以及管理帐户和角色。 ### 流水线 @@ -44,16 +36,15 @@ weight: 11120 ### 凭证 -具有所需权限的 DevOps 工程用户可以为流水线配置凭证,以便与外部环境进行交互。用户在 DevOps 工程中添加凭证后,DevOps 工程就可以使用这些凭证与第三方应用程序(例如 GitHub、GitLab 和 Docker Hub)进行交互。有关更多信息,请参见[凭证管理](../../how-to-use/credential-management/)。 +具有所需权限的 DevOps 项目用户可以为流水线配置凭证,以便与外部环境进行交互。用户在 DevOps 项目中添加凭证后,DevOps 项目就可以使用这些凭证与第三方应用程序(例如 GitHub、GitLab 和 Docker Hub)进行交互。有关更多信息,请参见[凭证管理](../../how-to-use/credential-management/)。 ### 成员和角色 -与项目相似,DevOps 工程也需要为用户授予不同的角色,然后用户才能在 DevOps 工程中工作。工程管理员(例如 `project-admin`)负责邀请租户并授予他们不同的角色。有关更多信息,请参见[角色和成员管理](../role-and-member-management/)。 +与项目相似,DevOps 项目也需要为用户授予不同的角色,然后用户才能在 DevOps 项目中工作。项目管理员(例如 `project-admin`)负责邀请租户并授予他们不同的角色。有关更多信息,请参见[角色和成员管理](../role-and-member-management/)。 -## 编辑或删除 DevOps 工程 +## 编辑或删除 DevOps 项目 -1. 点击**工程管理**下的**基本信息**,您可以查看当前 DevOps 工程的概述,包括工程角色和工程成员的数量、工程名称和工程创建者。 +1. 点击 **DevOps 项目设置**下的**基本信息**,您可以查看当前 DevOps 项目的概述,包括项目角色和项目成员的数量、项目名称和项目创建者。 -2. 点击右侧的**工程管理**,您可以编辑此 DevOps 工程的基本信息或删除 DevOps 工程。 +2. 点击右侧的 **DevOps 管理**,您可以编辑此 DevOps 项目的基本信息或删除 DevOps 项目。 - ![工程基本信息](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info-5.png) diff --git a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md index ad223452a..12219d51e 100644 --- a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md +++ b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/overview.md @@ -8,36 +8,32 @@ weight: 11110 DevOps 是一系列做法和工具,可以使 IT 和软件开发团队之间的流程实现自动化。其中,随着敏捷软件开发日趋流行,持续集成 (CI) 和持续交付 (CD) 已经成为该领域一个理想的解决方案。在 CI/CD 工作流中,每次集成都通过自动化构建来验证,包括编码、发布和测试,从而帮助开发者提前发现集成错误,团队也可以快速、安全、可靠地将内部软件交付到生产环境。 -不过,传统的 Jenkins Master-Agent 架构(即多个 Agent 为一个 Master 工作)有以下不足。 +不过,传统的 Jenkins Controller-Agent 架构(即多个 Agent 为一个 Controller 工作)有以下不足。 -- 如果 Master 宕机,整个 CI/CD 流水线会崩溃。 +- 如果 Controller 宕机,整个 CI/CD 流水线会崩溃。 - 资源分配不均衡,一些 Agent 的流水线任务 (Job) 出现排队等待,而其他 Agent 处于空闲状态。 - 不同的 Agent 可能配置环境不同,并需要使用不同的编码语言。这种差异会给管理和维护带来不便。 ## 了解 KubeSphere DevOps -KubeSphere DevOps 工程支持源代码管理工具,例如 GitHub、Git 和 SVN。用户可以通过图形编辑面板 (Jenkinsfile out of SCM) 构建 CI/CD 流水线,或者从代码仓库 (Jenkinsfile in SCM) 创建基于 Jenkinsfile 的流水线。 +KubeSphere DevOps 项目支持源代码管理工具,例如 GitHub、Git 和 SVN。用户可以通过图形编辑面板 (Jenkinsfile out of SCM) 构建 CI/CD 流水线,或者从代码仓库 (Jenkinsfile in SCM) 创建基于 Jenkinsfile 的流水线。 ### 功能 KubeSphere DevOps 系统为您提供以下功能: -- 独立的 DevOps 工程,提供访问可控的 CI/CD 流水线。 +- 独立的 DevOps 项目,提供访问可控的 CI/CD 流水线。 - 开箱即用的 DevOps 功能,无需复杂的 Jenkins 配置。 - 支持 [Source-to-image (S2I)](../../../project-user-guide/image-builder/source-to-image/) 和 [Binary-to-image (B2I)](../../../project-user-guide/image-builder/binary-to-image/),快速交付镜像。 - [基于 Jenkinsfile 的流水线](../../../devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile/),提供一致的用户体验,支持多个代码仓库。 - [图形编辑面板](../../../devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel/),用于创建流水线,学习成本低。 - 强大的工具集成机制,例如 [SonarQube](../../../devops-user-guide/how-to-integrate/sonarqube/),用于代码质量检查。 -![流水线列表](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list-1.png) - -![SonarQube 详细结果](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.PNG) - ### KubeSphere CI/CD 流水线工作流 -KubeSphere CI/CD 流水线基于底层 Kubernetes Jenkins Agent 而运行。这些 Jenkins Agent 可以动态扩缩,即根据任务状态进行动态供应或释放。Jenkins Master 和 Agent 以 Pod 的形式运行在 KubeSphere 节点上。Master 运行在其中一个节点上,其配置数据存储在一个存储卷 (Volume) 中。Agent 运行在各个节点上,但可能不会一直处于运行状态,而是根据需求动态创建并自动删除。 +KubeSphere CI/CD 流水线基于底层 Kubernetes Jenkins Agent 而运行。这些 Jenkins Agent 可以动态扩缩,即根据任务状态进行动态供应或释放。Jenkins Controller 和 Agent 以 Pod 的形式运行在 KubeSphere 节点上。Controller 运行在其中一个节点上,其配置数据存储在一个存储卷 (Volume) 中。Agent 运行在各个节点上,但可能不会一直处于运行状态,而是根据需求动态创建并自动删除。 -当 Jenkins Master 收到构建请求,会根据标签动态创建运行在 Pod 中的 Jenkins Agent 并注册到 Master 上。当 Agent 运行完任务后,将会被释放,相关的 Pod 也会被删除。 +当 Jenkins Controller 收到构建请求,会根据标签动态创建运行在 Pod 中的 Jenkins Agent 并注册到 Controller 上。当 Agent 运行完任务后,将会被释放,相关的 Pod 也会被删除。 ### 动态供应 Jenkins Agent @@ -47,4 +43,4 @@ KubeSphere CI/CD 流水线基于底层 Kubernetes Jenkins Agent 而运行。这 **高可扩缩性**:当 KubeSphere 集群因资源不足而导致任务长时间排队等待时,您可以向集群新增节点。 -**高可用性**:当 Jenkins Master 故障时,KubeSphere 会自动创建一个新的 Jenkins Master 容器,并将存储卷挂载至新创建的容器,保证数据不会丢失,从而实现集群高可用。 \ No newline at end of file +**高可用性**:当 Jenkins Controller 故障时,KubeSphere 会自动创建一个新的 Jenkins Controller 容器,并将存储卷挂载至新创建的容器,保证数据不会丢失,从而实现集群高可用。 \ No newline at end of file diff --git a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md index 1408afe41..7c9cc7c50 100644 --- a/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md +++ b/content/zh/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management.md @@ -1,61 +1,55 @@ --- title: "角色和成员管理" keywords: 'Kubernetes, KubeSphere, DevOps, 角色, 成员' -description: '在 DevOps 工程中创建并管理各种角色和成员。' +description: '在 DevOps 项目中创建并管理各种角色和成员。' linkTitle: "角色和成员管理" weight: 11130 --- -本教程演示如何在 DevOps 工程中管理角色和成员。 +本教程演示如何在 DevOps 项目中管理角色和成员。 -在 DevOps 工程范围内,您可以向角色授予以下资源的权限: +在 DevOps 项目范围内,您可以向角色授予以下资源的权限: - 流水线 - 凭证 -- DevOps 工程设置 +- DevOps 项目设置 - 访问控制 ## 准备工作 -至少已创建一个 DevOps 工程,例如 `demo-devops`。此外,您需要一个在 DevOps 工程级别具有 `admin` 角色的帐户(例如 `devops-admin`)。 +至少已创建一个 DevOps 项目,例如 `demo-devops`。此外,您需要一个在 DevOps 项目级别具有 `admin` 角色的用户(例如 `devops-admin`)。 ## 内置角色 -在**工程角色**中,有三个可用的内置角色,如下所示。创建 DevOps 工程时,KubeSphere 会自动创建内置角色,并且无法编辑或删除这些角色。 +在 **DevOps 项目角色**中,有三个可用的内置角色,如下所示。创建 DevOps 项目时,KubeSphere 会自动创建内置角色,并且无法编辑或删除这些角色。 | 内置角色 | 描述信息 | | ------------------ | ------------------------------------------------------------ | -| viewer | DevOps 工程观察者,可以查看 DevOps 工程下所有的资源。 | -| operator | DevOps 工程普通成员,可以在 DevOps 工程下创建流水线凭证等。 | -| admin | DevOps 工程管理员,可以管理 DevOps 工程下所有的资源。 | +| viewer | DevOps 项目观察者,可以查看 DevOps 项目下所有的资源。 | +| operator | DevOps 项目普通成员,可以在 DevOps 项目下创建流水线凭证等。 | +| admin | DevOps 项目管理员,可以管理 DevOps 项目下所有的资源。 | -## 创建 DevOps 工程角色 +## 创建 DevOps 项目角色 -1. 以 `devops-admin` 身份登录控制台,然后前往 **DevOps 工程**页面选择一个 DevOps 工程(例如 `demo-devops`)。 +1. 以 `devops-admin` 身份登录控制台,然后前往 **DevOps 项目**页面选择一个 DevOps 项目(例如 `demo-devops`)。 {{< notice note >}} - 本教程使用 `devops-admin` 帐户作为示例。只要您使用的帐户被授予的角色包含 DevOps 工程级别**访问控制**中的**成员查看**、**角色管理**和**角色查看**的权限,此帐户便可以创建 DevOps 工程角色。 + 本教程使用 `devops-admin` 帐户作为示例。只要您使用的帐户被授予的角色包含 DevOps 项目级别**访问控制**中的**成员查看**、**角色管理**和**角色查看**的权限,此帐户便可以创建 DevOps 项目角色。 {{}} -2. 转到**工程管理**中的**工程角色**,点击**创建**并设置**角色标识符**。在本示例中,将创建一个名为 `pipeline-creator` 的角色。点击**编辑权限**继续。 - - ![创建角色](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_1.png) +2. 转到 **DevOps 项目设置**中的 **DevOps 项目角色**,点击**创建**并设置**名称**。在本示例中,将创建一个名为 `pipeline-creator` 的角色。点击**编辑权限**继续。 3. 在**流水线管理**中,选择您希望授予该角色的权限。例如,为此角色选择了**流水线管理**和**流水线查看**。点击**确定**完成操作。 - ![分配角色](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_2.png) - {{< notice note >}} **依赖于**表示首先需要选择主要权限(**依赖于**之后列出的),以便可以分配关联权限。 {{}} -4. 新创建的角色将列在**工程角色**中。您可以点击右侧的 对其进行编辑。 - - ![角色列表](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-list_3.png) +4. 新创建的角色将列在 **DevOps 项目角色**中。您可以点击右侧的 对其进行编辑。 {{< notice note >}} @@ -65,21 +59,18 @@ weight: 11130 ## 邀请新成员 -1. 在**工程管理**中选择**工程成员**,然后点击**邀请成员**。 +1. 在 **DevOps 项目设置**中选择 **DevOps 项目成员**,然后点击**邀请**。 -2. 点击 邀请帐户加入此 DevOps 工程,并向此帐户授予 `pipeline-creator` 角色。 - - ![邀请成员](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-invite-member_4.png) +2. 点击 邀请帐户加入此 DevOps 项目,并向此帐户授予 `pipeline-creator` 角色。 {{< notice note >}} - 必须先邀请用户加入 DevOps 工程所在的企业空间。 + 必须先邀请用户加入 DevOps 项目所在的企业空间。 {{}} -3. 点击**确定**将用户添加到此 DevOps 工程。在**工程成员**中,您可以看到列出了新邀请的成员。 +3. 点击**确定**将用户添加到此 DevOps 项目。在 **DevOps 项目成员**中,您可以看到列出了新邀请的成员。 -4. 您还可以通过编辑现有成员来更改其角色或将其从 DevOps 工程中删除。 +4. 您还可以通过编辑现有成员来更改其角色或将其从 DevOps 项目中删除。 - ![编辑成员](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-user-edit_5.png) diff --git a/content/zh/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md b/content/zh/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md index 18ad126a8..3073a776a 100644 --- a/content/zh/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md +++ b/content/zh/docs/faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace.md @@ -12,9 +12,9 @@ Kubernetes 命名空间即 KubeSphere 项目。如果您不是在 KubeSphere 控 ## 准备工作 -- 您需要有一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个具有该权限的新角色并将其分配至一个帐户。 +- 您需要有一个具有**集群管理**权限的用户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个具有该权限的新角色并将其分配至一个用户。 -- 您需要有一个可用的企业空间,以便将命名空间分配至该企业空间。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要有一个可用的企业空间,以便将命名空间分配至该企业空间。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建 Kubernetes 命名空间 @@ -28,15 +28,11 @@ kubectl create ns demo-namespace ## 添加命名空间至 KubeSphere 企业空间 -1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面。点击**项目管理**,您可以查看在当前集群中运行的所有项目(即命名空间),包括前述刚刚创建的项目。 +1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面。点击**项目**,您可以查看在当前集群中运行的所有项目),包括前述刚刚创建的项目。 2. 通过 kubectl 创建的命名空间不属于任何企业空间。请点击右侧的 ,选择**分配企业空间**。 - ![分配企业空间](/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/assign-workspace.PNG) +3. 在弹出的对话框中,为该项目选择一个**企业空间**和**项目管理员**,然后点击**确定**。 -3. 在弹出的对话框中,为该项目选择一个**目标企业空间**和**项目管理员**,然后点击**确定**。 - -4. 转到您的企业空间,可以在**项目管理**页面看到该项目已显示。 - - ![项目管理页面](/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/project-page.PNG) +4. 转到您的企业空间,可以在**项目**页面看到该项目已显示。 diff --git a/content/zh/docs/faq/access-control/cannot-login.md b/content/zh/docs/faq/access-control/cannot-login.md index eb73837d3..76e5625fe 100644 --- a/content/zh/docs/faq/access-control/cannot-login.md +++ b/content/zh/docs/faq/access-control/cannot-login.md @@ -1,14 +1,14 @@ --- -title: "帐户无法登录" -keywords: "无法登录, account is not active, KubeSphere, Kubernetes" +title: "用户无法登录" +keywords: "无法登录, 用户不活跃, KubeSphere, Kubernetes" description: "如何解决无法登录的问题" -linkTitle: "帐户无法登录" +linkTitle: "用户无法登录" Weight: 16440 --- -KubeSphere 安装时会自动创建默认帐户 (`admin/P@88w0rd`),密码错误或者帐户状态不是**活跃**会导致无法登录。 +KubeSphere 安装时会自动创建默认用户 (`admin/P@88w0rd`),密码错误或者用户状态不是**活跃**会导致无法登录。 -下面是帐户无法登录时,一些常见的问题: +下面是用户无法登录时,一些常见的问题: ## Account Not Active @@ -16,7 +16,7 @@ KubeSphere 安装时会自动创建默认帐户 (`admin/P@88w0rd`),密码错 ![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/account-not-active.png) -1. 执行以下命令检查帐户状态: +1. 执行以下命令检查用户状态: ```bash $ kubectl get users @@ -86,11 +86,11 @@ kubectl -n kubesphere-system get deploy ks-apiserver -o jsonpath='{.spec.templat kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spec.template.spec.containers[0].image}' ``` -## 帐户或密码错误 +## 用户名或密码错误 ![incorrect-password](/images/docs/faq/access-control-and-account-management/cannot-login/wrong-password.png) -通过以下命令检查帐户密码是否正确: +通过以下命令检查用户密码是否正确: ``` curl -u : "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes" @@ -141,23 +141,3 @@ E0909 07:05:22.770468 1 redis.go:51] unable to reach redis host EOF ``` kubectl -n kubesphere-system rollout restart deploy ks-console ``` - -## 升级到 v3.1.0 后无法通过第三方帐号登录 - -![forbidden](/images/docs/faq/access-control-and-account-management/cannot-login/forbidden.jpg) - -```js -{ - code: 403, - kind: 'Status', - apiVersion: 'v1', - metadata: {}, - status: 'Failure', - message: 'users.iam.kubesphere.io is forbidden: User "system:pre-registration" cannot create resource "users" in API group "iam.kubesphere.io" at the cluster scope', - reason: 'Forbidden', - details: { group: 'iam.kubesphere.io', kind: 'users' }, - statusText: 'Forbidden' -} -``` - -这是一个从 v3.0.0 升级到 v3.1.0 过程中存在的 Bug。相关和解决方式请参见 [https://github.com/kubesphere/kubesphere/issues/3850](https://github.com/kubesphere/kubesphere/issues/3850)。 \ No newline at end of file diff --git a/content/zh/docs/faq/access-control/forgot-password.md b/content/zh/docs/faq/access-control/forgot-password.md index b7f2af961..9d479f663 100644 --- a/content/zh/docs/faq/access-control/forgot-password.md +++ b/content/zh/docs/faq/access-control/forgot-password.md @@ -1,16 +1,22 @@ --- title: "重置帐户密码" keywords: "忘记, 密码, KubeSphere, Kubernetes" -description: "重置任意一个帐户的密码。" +description: "重置任意一个用户的密码。" linkTitle: "重置帐户密码" Weight: 16410 --- ## 重置普通用户密码 -具有用户管理权限的管理员可修改帐户密码。在**帐户管理**页面,点击需要修改密码的帐户。在帐户的详情页面,点击**更多操作**并选择**修改密码**。 +1. 使用具有用户管理权限的用户登录 KubeSphere Web 控制台。 -![modify-password](/images/docs/zh-cn/faq/forgot-password/modify-password.png) +2. 点击左上角的**平台管理**,选择**访问**控制。点击**用户**。 + +3. 在**用户**页面,点击需要修改密码的用户进入详情页。 + +4. 在用户的详情页,点击**更多操作**并选择**修改密码**。 + +5. 在出现的对话框中,输入新的密码并重复输入新的密码。完成后点击**确定**。 ## 重置管理员密码 @@ -22,6 +28,6 @@ kubectl patch users -p '{"spec":{"password":""}}' --typ {{< notice note >}} -请将命令中的 `` 修改为实际的帐户名称,将 `` 修改为实际的新密码。 +请将命令中的 `` 修改为实际的用户名,将 `` 修改为实际的新密码。 {{}} \ No newline at end of file diff --git a/content/zh/docs/faq/applications/remove-built-in-apps.md b/content/zh/docs/faq/applications/remove-built-in-apps.md index a5d556bdf..e236c9b4c 100644 --- a/content/zh/docs/faq/applications/remove-built-in-apps.md +++ b/content/zh/docs/faq/applications/remove-built-in-apps.md @@ -6,36 +6,28 @@ linkTitle: "移除 KubeSphere 中的内置应用" Weight: 16910 --- -作为一个以应用为中心的开源容器平台,KubeSphere 在基于 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的应用商店中集成了 17 个内置应用。这些应用可供企业空间内的所有租户使用,但您也可以将这些应用从应用商店中移除。本教程为您演示怎样从应用商店中移除内置应用。 +作为一个以应用为中心的开源容器平台,KubeSphere 在基于 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的应用商店中集成了应用。这些应用可供企业空间内的所有租户使用,但您也可以将这些应用从应用商店中移除。本教程为您演示怎样从应用商店中移除内置应用。 ## 准备工作 -- 您需要在本教程中使用具有 `platform-admin` 角色的帐户(例如:`admin`)。 +- 您需要在本教程中使用具有 `platform-admin` 角色的用户(例如:`admin`)。 - 您需要[启用应用商店](../../../pluggable-components/app-store/)。 ## 移除内置应用 1. 以 `admin` 身份登录 Web 控制台,点击左上角**平台管理**,然后选择**应用商店管理**。 -2. 在**应用商店**页面,您可以看到列表中展示了 17 个内置应用。选择您想要从应用商店中移除的应用,例如,点击 **Tomcat** 跳转到其详情页面。 - - ![click-tomcat](/images/docs/zh-cn/faq/applications/remove-built-in-apps/click-tomcat.png) +2. 在**应用**页面,您可以看到列表中展示的应用。选择您想要从应用商店中移除的应用,例如,点击 **Tomcat** 跳转到其详情页面。 3. 在 Tomcat 的详情页面,点击**下架应用**以移除应用。 - ![suspend-tomcat](/images/docs/zh-cn/faq/applications/remove-built-in-apps/suspend-tomcat.png) - 4. 在出现的对话框中,点击**确定**以确认您的操作。 - ![confirm-suspend](/images/docs/zh-cn/faq/applications/remove-built-in-apps/confirm-suspend.png) - 5. 若要让该应用在应用商店中再次可用,请点击**上架应用**,然后点击**确定**以确认您的操作。 - ![activate-tomcat](/images/docs/zh-cn/faq/applications/remove-built-in-apps/activate-tomcat.png) - {{< notice note >}} - 您也可以根据自己的需要,来创建包含必须角色的新帐户。有关更多在 KubeSphere 中管理应用的信息,请参考[应用程序生命周期管理](../../../application-store/app-lifecycle-management/)。 + 您也可以根据自己的需要,来创建包含必须角色的新用户。有关更多在 KubeSphere 中管理应用的信息,请参考[应用程序生命周期管理](../../../application-store/app-lifecycle-management/)。 {{}} diff --git a/content/zh/docs/faq/applications/reuse-the-same-app-name-after-deletion.md b/content/zh/docs/faq/applications/reuse-the-same-app-name-after-deletion.md deleted file mode 100644 index 1b018a549..000000000 --- a/content/zh/docs/faq/applications/reuse-the-same-app-name-after-deletion.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "删除应用后复用相同应用名称" -keywords: "KubeSphere, OpenPitrix, 应用程序, 应用" -description: "了解如何在删除应用后复用相同应用名称。" -linkTitle: "删除应用后复用相同应用名称" -Weight: 16920 ---- - -若要在 KubeSphere 中部署应用,租户可以进入应用商店,根据自己的需求选择可用的应用。但是,租户在部署与被删除的应用名称相同的应用时,可能会遇到错误。本教程演示了如何在删除应用后复用相同应用名称。 - -## 准备工作 - -- 您需要使用被邀请到项目中、且具有 `operator` 角色的帐户。本教程使用 `project-regular` 帐户进行演示。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 -- 您需要[启用应用商店](../../../pluggable-components/app-store/)。 - -## 复用相同应用名称 - -### 从应用商店中部署应用 - -1. 以 `project-regular` 身份登录 KubeSphere 的 Web 控制台,从应用商店中部署应用。本教程使用 Redis 作为示例应用,将应用名称设置为 `redis-1`。有关更多如何部署 Redis 的信息,请参考[在 KubeSphere 上部署 Redis](../../../application-store/built-in-apps/redis-app/)。 - - ![redis-1](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/redis-1.png) - -2. 点击该应用访问其详情页,然后点击**删除**以删除应用。 - - ![delete-redis-1](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-redis-1.png) - -### 复用相同应用名称 - -1. 如果您尝试使用与 `redis-1` 相同的应用名称来部署新的 Redis 应用,您将在右上角看到以下错误提示。 - - ![error-prompt](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/error-prompt.png) - -3. 在项目中,访问**配置中心**下的**密钥**,在搜索栏中输入 `redis-1` 搜索密钥。 - - ![search-secret](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/search-secret.png) - -3. 点击该密钥以访问其详情页,点击**更多操作**从下拉菜单中选择**删除**。 - - ![delete-secret](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-secret.png) - -4. 在出现的对话框中,输入密钥名称,点击**确定**以删除密钥。 - - ![confirm-delete](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/confirm-delete.png) - -5. 现在,您就能用与 `redis-1` 相同的应用名称来部署新的 Redis 应用。 - - ![new-redis-app](/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/new-redis-app.png) \ No newline at end of file diff --git a/content/zh/docs/faq/console/change-console-language.md b/content/zh/docs/faq/console/change-console-language.md index 662972efd..513f8d38c 100644 --- a/content/zh/docs/faq/console/change-console-language.md +++ b/content/zh/docs/faq/console/change-console-language.md @@ -16,10 +16,10 @@ KubeSphere Web 控制台目前支持四种语言:简体中文、繁体中文 ## 更改控制台语言 -1. 登录 KubeSphere,点击右上角的帐户名称。 +1. 登录 KubeSphere,点击右上角的用户名。 2. 在下拉菜单中,选择**个人设置**。 3. 在**基本信息**页面,从**语言**下拉列表中选择所需的语言。 -4. 点击 保存设置。 \ No newline at end of file +4. 点击 保存设置。 \ No newline at end of file diff --git a/content/zh/docs/faq/console/edit-resources-in-system-workspace.md b/content/zh/docs/faq/console/edit-resources-in-system-workspace.md index 186dd4c8a..cb1f89965 100644 --- a/content/zh/docs/faq/console/edit-resources-in-system-workspace.md +++ b/content/zh/docs/faq/console/edit-resources-in-system-workspace.md @@ -18,7 +18,7 @@ Weight: 16520 ## 编辑控制台配置 -1. 以 `admin` 用户登录 KubeSphere,点击右下角的锤子图标,然后选择 **Kubectl**。 +1. 以 `admin` 用户登录 KubeSphere,点击右下角的 ,然后选择 **Kubectl**。 2. 执行如下命令: @@ -31,14 +31,14 @@ Weight: 16520 ```yaml client: version: - kubesphere: v3.0.0 - kubernetes: v1.17.9 - openpitrix: v0.3.5 + kubesphere: v3.2.1 + kubernetes: v1.21.5 + openpitrix: v3.2.1 enableKubeConfig: true systemWorkspace: "$" # 请手动添加此行。 ``` -4. 执行如下命令重新部署 `ks-console`,并等待 Pod 重建。 +4. 执行如下命令重新部署 `ks-console`,并等待容器组重建。 ```bash kubectl -n kubesphere-system rollout restart deployment ks-console diff --git a/content/zh/docs/faq/devops/create-devops-kubeconfig-on-aws.md b/content/zh/docs/faq/devops/create-devops-kubeconfig-on-aws.md index b13a6255f..340c15cba 100644 --- a/content/zh/docs/faq/devops/create-devops-kubeconfig-on-aws.md +++ b/content/zh/docs/faq/devops/create-devops-kubeconfig-on-aws.md @@ -62,9 +62,9 @@ Weight: 16820 kubectl apply -f devops-deploy.yaml ``` -### 步骤 2:获取 ServiceAccount 令牌 +### 步骤 2:获取服务帐户令牌 -1. 运行以下命令获取 ServiceAccount 的令牌。 +1. 运行以下命令获取服务帐户的令牌。 ```bash export TOKEN_NAME=$(kubectl -n kubesphere-sample-dev get sa devops-deploy -o jsonpath='{.secrets[0].name}') @@ -77,9 +77,7 @@ Weight: 16820 ### 步骤 3:创建 DevOps kubeconfig -1. 登录 AWS 集群的 KubeSphere 控制台,访问您的 DevOps 工程。转到**工程管理**下的**凭证**,然后点击**创建**。您可以按需输入该 kubeconfig 的**凭证 ID**。 - - ![create-kubeconfig](/images/docs/zh-cn/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png) +1. 登录 AWS 集群的 KubeSphere 控制台,访问您的 DevOps 项目。转到 **DevOps 项目设置**下的**凭证**,然后点击**创建**。您可以按需输入该 kubeconfig 的**凭证 ID**。 2. 在 **Content** 文本框中,请注意以下内容: diff --git a/content/zh/docs/faq/devops/install-jenkins-plugins.md b/content/zh/docs/faq/devops/install-jenkins-plugins.md index 6ff4f3a83..9849ce7f4 100644 --- a/content/zh/docs/faq/devops/install-jenkins-plugins.md +++ b/content/zh/docs/faq/devops/install-jenkins-plugins.md @@ -25,12 +25,12 @@ KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能,而提 1. 运行以下命令获取 Jenkins 的地址。 ```bash - export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins) + export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins) export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT ``` -2. 您会得到类似如下的输出。您可以通过输出的地址使用自己的 KubeSphere 帐户和密码(例如 `admin/P@88w0rd`)访问 Jenkins 面板。 +2. 您会得到类似如下的输出。您可以通过输出的地址使用自己的 KubeSphere 用户和密码(例如 `admin/P@88w0rd`)访问 Jenkins 面板。 ``` http://192.168.0.4:30180 @@ -38,7 +38,7 @@ KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能,而提 {{< notice note >}} - 请确保使用自己的 Jenkins 地址。根据您 Kubernetes 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。 + 请确保使用自己的 Jenkins 地址。根据您 KubeSphere 集群部署位置的不同,您可能需要在安全组中打开端口,并配置相关的端口转发规则。 {{}} @@ -46,16 +46,10 @@ KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能,而提 1. 登录 Jenkins 面板,点击**系统管理**。 - ![click-manage-jenkins](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png) - 2. 在**系统管理**页面,下滑到**插件管理**并点击。 - ![click-manage-plugins](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png) - 3. 点击**可选插件**选项卡,您必须使用搜索框来搜索所需插件。例如,您可以在搜索框中输入 `git`,勾选所需插件旁边的复选框,然后按需点击**直接安装**或**下载待重启后安装**。 - ![available-plugins](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/available-plugins.png) - {{< notice note >}} Jenkins 的插件相互依赖。安装插件时,您可能还需要安装其依赖项。 @@ -64,16 +58,10 @@ KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能,而提 4. 如果已预先下载 HPI 文件,您也可以点击**高级**选项卡,上传该 HPI 文件作为插件进行安装。 - ![click-advanced-tab](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png) - 5. 在**已安装**选项卡,可以查看已安装的全部插件。能够安全卸载的插件将会在右侧显示**卸载**按钮。 - ![installed-plugins](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/installed-plugins.png) - 6. 在**可更新**选项卡,先勾选插件左侧的复选框,再点击**下载待重启后安装**,即可安装更新的插件。您也可以点击**立即获取**按钮检查更新。 - ![update-plugins](/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/update-plugins.png) - ## 另请参见 [管理插件](https://www.jenkins.io/zh/doc/book/managing/plugins/) \ No newline at end of file diff --git a/content/zh/docs/faq/installation/configure-booster.md b/content/zh/docs/faq/installation/configure-booster.md index 9a1270979..b16a49e13 100644 --- a/content/zh/docs/faq/installation/configure-booster.md +++ b/content/zh/docs/faq/installation/configure-booster.md @@ -10,19 +10,7 @@ weight: 16200 ## 获取加速器地址 -您需要获取仓库的一个镜像地址以配置加速器。以下示例介绍如何从阿里云获取加速器地址。 - -1. 登录阿里云控制台,在搜索栏中输入“容器镜像服务”,点击搜索结果中的**容器镜像服务**。 - - ![container-registry.png](/images/docs/zh-cn/installing-on-linux/faq/configure-booster/container-registry.PNG) - -2. 点击**镜像加速器**。 - - ![image-booster](/images/docs/zh-cn/installing-on-linux/faq/configure-booster/image-booster.PNG) - -3. 在如图所示的位置获取**加速器地址**,并按照页面上提供的阿里云官方操作文档配置加速器。 - - ![booster-url](/images/docs/zh-cn/installing-on-linux/faq/configure-booster/booster-url.PNG) +您需要获取仓库的一个镜像地址以配置加速器。您可以参考如何[从阿里云获取加速器地址](https://www.alibabacloud.com/help/zh/doc-detail/60750.htm?spm=a2c63.p38356.b99.18.4f4133f0uTKb8S)。 ## 配置仓库镜像地址 diff --git a/content/zh/docs/faq/installation/telemetry.md b/content/zh/docs/faq/installation/telemetry.md index 8d3acc38a..9e57f07e6 100644 --- a/content/zh/docs/faq/installation/telemetry.md +++ b/content/zh/docs/faq/installation/telemetry.md @@ -29,7 +29,7 @@ Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes ### 安装前禁用 Telemetry -在现有 Kubernetes 集群上安装 KubeSphere 时,您需要下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件用于配置集群。如需禁用 Telemetry,请勿直接执行 `kubectl apply -f` 命令应用该文件。 +在现有 Kubernetes 集群上安装 KubeSphere 时,您需要下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件用于配置集群。如需禁用 Telemetry,请勿直接执行 `kubectl apply -f` 命令应用该文件。 {{< notice note >}} @@ -37,7 +37,7 @@ Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes {{}} -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件并编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件并编辑。 ```bash vi cluster-configuration.yaml @@ -57,7 +57,7 @@ Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes 3. 保存文件并执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -66,7 +66,7 @@ Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes 1. 以 `admin` 用户登录控制台,点击页面左上角的**平台管理**。 -2. 选择**集群管理**,在左侧导航栏中点击**自定义资源 CRD**。 +2. 选择**集群管理**,在左侧导航栏中点击 **CRD**。 {{< notice note >}} 如果[多集群功能](../../../multicluster-management/)已经启用,您需要先选择一个集群。 @@ -74,9 +74,9 @@ Telemetry 收集已安装 KubeSphere 集群的大小、KubeSphere 和 Kubernetes 3. 在搜索框中输入 `clusterconfiguration`,点击搜索结果打开详情页。 -4. 点击 `ks-installer` 右侧的 ,并选择**编辑配置文件**。 +4. 点击 `ks-installer` 右侧的 ,并选择**编辑 YAML**。 -5. 在文件末尾添加 `telemetry_enabled: false` 字段,点击**更新**。 +5. 在文件末尾添加 `telemetry_enabled: false` 字段,点击**确定**。 {{< notice note >}} diff --git a/content/zh/docs/faq/multi-cluster-management/_index.md b/content/zh/docs/faq/multi-cluster-management/_index.md index b19fa3adc..57f23b873 100644 --- a/content/zh/docs/faq/multi-cluster-management/_index.md +++ b/content/zh/docs/faq/multi-cluster-management/_index.md @@ -1,7 +1,7 @@ --- title: "多集群管理" -keywords: 'Kubernetes, KubeSphere, 多集群管理, Host Cluster, Member Cluster' -description: 'Faq about multi-cluster management in KubeSphere' +keywords: 'Kubernetes, KubeSphere, 多集群管理, 主集群, 成员集群' +description: 'KubeSphere 多集群管理常见问题' layout: "second" weight: 16700 --- diff --git a/content/zh/docs/faq/multi-cluster-management/host-cluster-access-member-cluster.md b/content/zh/docs/faq/multi-cluster-management/host-cluster-access-member-cluster.md index fddca54e3..8f66b661b 100644 --- a/content/zh/docs/faq/multi-cluster-management/host-cluster-access-member-cluster.md +++ b/content/zh/docs/faq/multi-cluster-management/host-cluster-access-member-cluster.md @@ -1,18 +1,18 @@ --- -title: "恢复 Host 集群对 Member 集群的访问权限" -keywords: "Kubernetes, KubeSphere, 多集群, Host 集群, Member 集群" -description: "了解如何恢复 Host 集群对 Member 集群的访问。" -linkTitle: "恢复 Host 集群对 Member 集群的访问权限" +title: "恢复主集群对成员集群的访问权限" +keywords: "Kubernetes, KubeSphere, 多集群, 主集群, 成员集群" +description: "了解如何恢复主集群对成员集群的访问。" +linkTitle: "恢复主集群对成员集群的访问权限" Weight: 16720 --- -[多集群管理](../../../multicluster-management/introduction/kubefed-in-kubesphere/)是 KubeSphere 的一大特色,拥有必要权限的租户(通常是集群管理员)能够从 Host 集群访问中央控制平面,以管理全部 Member 集群。强烈建议您通过 Host 集群管理整个集群的资源。 +[多集群管理](../../../multicluster-management/introduction/kubefed-in-kubesphere/)是 KubeSphere 的一大特色,拥有必要权限的租户(通常是集群管理员)能够从主集群访问中央控制平面,以管理全部成员集群。强烈建议您通过主集群管理整个集群的资源。 -本教程演示如何恢复 Host 集群对 Member 集群的访问权限。 +本教程演示如何恢复主集群对成员集群的访问权限。 ## 可能出现的错误信息 -如果您无法从中央控制平面访问 Member 集群,并且浏览器一直将您重新定向到 KubeSphere 的登录页面,请在该 Member 集群上运行以下命令来获取 ks-apiserver 的日志。 +如果您无法从中央控制平面访问成员集群,并且浏览器一直将您重新定向到 KubeSphere 的登录页面,请在该成员集群上运行以下命令来获取 ks-apiserver 的日志。 ``` kubectl -n kubesphere-system logs ks-apiserver-7c9c9456bd-qv6bs @@ -20,7 +20,7 @@ kubectl -n kubesphere-system logs ks-apiserver-7c9c9456bd-qv6bs {{< notice note >}} -`ks-apiserver-7c9c9456bd-qv6bs` 指的是该 Member 集群上的 Pod ID。请确保您使用自己的 Pod ID。 +`ks-apiserver-7c9c9456bd-qv6bs` 指的是该成员集群上的容器组 ID。请确保您使用自己的容器组 ID。 {{}} @@ -42,7 +42,7 @@ E0305 03:47:34.502764 1 authentication.go:60] Unable to authenticate the r ### 步骤 1:验证 jwtSecret -分别在 Host 集群和 Member 集群上运行以下命令,确认它们的 jwtSecret 是否相同。 +分别在主集群和成员集群上运行以下命令,确认它们的 jwtSecret 是否相同。 ``` kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v “apiVersion” | grep jwtSecret @@ -50,7 +50,7 @@ kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v “apiVe ### 步骤 2:更改 `accessTokenMaxAge` -请确保 Host 集群和 Member 集群的 jwtSecret 相同,然后在该 Member 集群上运行以下命令获取 `accessTokenMaxAge` 的值。 +请确保主集群和成员集群的 jwtSecret 相同,然后在该成员集群上运行以下命令获取 `accessTokenMaxAge` 的值。 ``` kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep accessTokenMaxAge @@ -68,4 +68,4 @@ kubectl -n kubesphere-system edit cm kubesphere-config -o yaml kubectl -n kubesphere-system rollout restart deploy ks-apiserver ``` -现在,您可以再次从中央控制平面访问该 Member 集群。 \ No newline at end of file +现在,您可以再次从中央控制平面访问该成员集群。 \ No newline at end of file diff --git a/content/zh/docs/faq/multi-cluster-management/manage-multi-cluster.md b/content/zh/docs/faq/multi-cluster-management/manage-multi-cluster.md index 908bfa8e0..d50fc8330 100644 --- a/content/zh/docs/faq/multi-cluster-management/manage-multi-cluster.md +++ b/content/zh/docs/faq/multi-cluster-management/manage-multi-cluster.md @@ -11,50 +11,50 @@ KubeSphere 提供了易于使用的多集群功能,帮助您[在 KubeSphere ## 准备工作 -- 请确保您的 Kubernetes 集群在用作 Host 集群和 Member 集群之前已安装 KubeSphere。 -- 请确保 Host 集群和 Member 集群分别设置了正确的集群角色,并且在 Host 集群和 Member 集群上的 `jwtSecret` 也相同。 -- 建议 Member 集群在导入 Host 集群之前是干净环境,即没有创建任何资源。 +- 请确保您的 Kubernetes 集群在用作主集群和成员集群之前已安装 KubeSphere。 +- 请确保主集群和成员集群分别设置了正确的集群角色,并且在主集群和成员集群上的 `jwtSecret` 也相同。 +- 建议成员集群在导入主集群之前是干净环境,即没有创建任何资源。 ## 管理 KubeSphere 多集群环境 -当您在 KubeSphere 上创建多集群环境之后,您可以通过 Host 集群的中央控制平面管理该环境。在创建资源的时候,您可以选择一个特定的集群,但是需要避免您的 Host 集群过载。不建议您登录 Member 集群的 KubeSphere Web 控制台去创建资源,因为部分资源(例如:企业空间)将不会同步到您的 Host 集群进行管理。 +当您在 KubeSphere 上创建多集群环境之后,您可以通过主集群的中央控制平面管理该环境。在创建资源的时候,您可以选择一个特定的集群,但是需要避免您的主集群过载。不建议您登录成员集群的 KubeSphere Web 控制台去创建资源,因为部分资源(例如:企业空间)将不会同步到您的主集群进行管理。 ### 资源管理 -不建议您将 Host 集群转换成 Member 集群或将 Member 集群转换成 Host 集群。如果一个 Member 集群曾经被导入进 Host 集群,您将该 Member 集群从先前的 Host 集群解绑后,再导入进新的 Host 集群时必须使用相同的集群名称。 +不建议您将主集群转换为成员集群,或将成员集群转换成主集群。如果一个成员集群曾经被导入进主集群,您将该成员集群从先前的主集群解绑后,再导入进新的主集群时必须使用相同的集群名称。 -如果您想在将 Member 集群导入新的 Host 集群时保留现有项目(即命名空间),请按照以下步骤进行操作。 +如果您想在将成员集群导入新的主集群时保留现有项目,请按照以下步骤进行操作。 -1. 在 Member 集群上运行以下命令将需要保留的项目从企业空间解绑。 +1. 在成员集群上运行以下命令将需要保留的项目从企业空间解绑。 ```bash kubectl label ns kubesphere.io/workspace- && kubectl patch ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge ``` -2. 在 Member 集群运行以下命令清除您的企业空间。 +2. 在成员集群运行以下命令清除您的企业空间。 ```bash kubectl delete workspacetemplate ``` -3. 当您在 Host 集群中创建新的企业空间,并将 Member 集群分配到这个企业空间时,请在 Member 集群运行以下命令将保留的项目绑定至新的企业空间。 +3. 当您在主集群中创建新的企业空间,并将成员集群分配到这个企业空间时,请在成员集群运行以下命令将保留的项目绑定至新的企业空间。 ```bash kuebctl label ns kubesphere.io/workspace= ``` -### 帐户管理 +### 用户管理 -您通过 Host 集群的中央控制平面创建的帐户会被同步至 Member 集群。 +您通过主集群的中央控制平面创建的用户会被同步至成员集群。 -如果您希望让不同的帐访问不同的集群,您可以创建企业空间并[赋予他们不同的集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 在此之后,您可以根据这些帐户的访问要求,邀请不同的帐户至这些企业空间。在未来版本中,您可以邀请帐户至[多集群项目](../../../project-administration/project-and-multicluster-project/#多集群项目)中。 +如果您希望让不同的用户访问不同的集群,您可以创建企业空间并[赋予他们不同的集群](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 在此之后,您可以根据这些用户的访问要求,邀请不同的用户至这些企业空间。 ### KubeSphere 组件管理 -KubeSphere 提供了一些可插拔组件,您可以根据需要去启用。在多集群环境下,您可以在 Host 集群或 Member 集群上启用这些组件。 +KubeSphere 提供了一些可插拔组件,您可以根据需要去启用。在多集群环境下,您可以在主集群或成员集群上启用这些组件。 -例如,您只需在 Host 集群上启用应用商店,就可以直接在 Member 集群上使用与应用商店相关的功能。对于其他组件,当您在 Host 集群上启用它们时,仍然需要在 Member 集群上手动启用相同组件以实现相同的功能。此外,您还可以仅在 Member 集群上启用组件,以便仅在 Member 集群上实现相应的功能。 +例如,您只需在主集群上启用应用商店,就可以直接在成员集群上使用与应用商店相关的功能。对于其他组件,当您在主集群上启用它们时,仍然需要在成员集群上手动启用相同组件以实现相同的功能。此外,您还可以仅在成员集群上启用组件,以便仅在成员集群上实现相应的功能。 有关如何启用可插拔组件的更多信息,请参考[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/faq/observability/byop.md b/content/zh/docs/faq/observability/byop.md index afd140828..8fb766bdf 100644 --- a/content/zh/docs/faq/observability/byop.md +++ b/content/zh/docs/faq/observability/byop.md @@ -113,7 +113,7 @@ KubeSphere 3.0 使用 Prometheus Operator 来管理 Prometheus/Alertmanager 配 如果您的 Prometheus 堆栈不是由 Prometheus Operator 进行管理,您可以跳过此步骤。但请务必确保: -- 您必须将 [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) 和 [PrometheusRule for ETCD](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) 中的记录/告警规则复制至您的 Prometheus 配置中,以便 KubeSphere 3.0 能够正常运行。 +- 您必须将 [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) 和 [PrometheusRule for etcd](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) 中的记录/告警规则复制至您的 Prometheus 配置中,以便 KubeSphere 3.0 能够正常运行。 - 配置您的 Prometheus,使其抓取指标的目标 (Target) 与 [KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml) 中列出的 ServiceMonitor 的目标相同。 diff --git a/content/zh/docs/faq/observability/logging.md b/content/zh/docs/faq/observability/logging.md index 0939942e3..7c7f9fc77 100644 --- a/content/zh/docs/faq/observability/logging.md +++ b/content/zh/docs/faq/observability/logging.md @@ -16,6 +16,7 @@ weight: 16310 - [工具箱显示今天没有日志记录](../../observability/logging/#工具箱显示今天没有日志记录) - [在工具箱中查看日志时,报告内部服务器错误](../../observability/logging/#在工具箱中查看日志时报告内部服务器错误) - [如何让 KubeSphere 只收集指定工作负载的日志](../../observability/logging/#如何让-kubesphere-只收集指定工作负载的日志) +- [在查看容器实时日志的时候,控制台上看到的实时日志要比 kubectl log -f xxx 看到的少](../../observability/logging/#在查看容器实时日志的时候控制台上看到的实时日志要比-kubectl-log--f-xxx-看到的少) ## 如何将日志存储改为外部 Elasticsearch 并关闭内部 Elasticsearch @@ -27,7 +28,7 @@ weight: 16310 kubectl edit cc -n kubesphere-system ks-installer ``` -2. 将 `es.elasticsearchDataXXX`、`es.elasticsearchMasterXXX` 和 `status.logging` 的注释取消,将 `es.externalElasticsearchUrl` 设置为 Elasticsearch 的地址,将 `es.externalElasticsearchPort` 设置为其端口号。以下示例供您参考: +2. 将 `es.elasticsearchDataXXX`、`es.elasticsearchMasterXXX` 和 `status.logging` 的注释取消,将 `es.externalElasticsearchHost` 设置为 Elasticsearch 的地址,将 `es.externalElasticsearchPort` 设置为其端口号。以下示例供您参考: ```yaml apiVersion: installer.kubesphere.io/v1alpha1 @@ -46,7 +47,7 @@ weight: 16310 # elasticsearchMasterVolumeSize: 4Gi elkPrefix: logstash logMaxAge: 7 - externalElasticsearchUrl: <192.168.0.2> + externalElasticsearchHost: <192.168.0.2> externalElasticsearchPort: <9200> ... status: @@ -161,3 +162,10 @@ kubectl edit input -n kubesphere-logging-system tail 更新 `Input.Spec.Tail.ExcludePath` 字段。例如,将路径设置为 `/var/log/containers/*_kube*-system_*.log`,以排除系统组件的全部日志。 有关更多信息,请参见 [Fluent Bit Operator](https://github.com/kubesphere/fluentbit-operator)。 + +## 在查看容器实时日志的时候,控制台上看到的实时日志要比 kubectl log -f xxx 看到的少 + +主要有以下几个原因: + +- 当实时去查看容器日志时,Kubernetes 是分 chunk 形式返回,Kubernetes 大概 2 分钟左右会返回一次数据,比较慢 +- 未开启‘实时查看’时看到的末尾部分,在实时查看时,被划分在下次返回的部分中,现象看起来像是日志缺失 diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md index 2459129ed..e7d11cf4e 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke.md @@ -42,13 +42,13 @@ Server Version: version.Info{Major:"1", Minor:"18+", GitVersion:"v1.18.4-tke.2", - 使用 kubectl 执行以下命令安装 KubeSphere: ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` - 下载集群配置文件 ```bash -wget https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` {{< notice tip >}} @@ -105,8 +105,6 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app= - 由于服务安装时默认开启 NodePort 且端口为 30880,浏览器输入 `<公网 IP>:30880` ,并以默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录控制台。 -![console.png](/images/docs/tencent-tke/console.png) - #### LoadBalancer 方式访问 - 在 `容器服务` > `集群` 界面中,选择创建好的集群,在 `服务与路由` > `service` 面板中,点击 `ks-console` 一行中 `更新访问方式`。 @@ -123,8 +121,6 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app= - 浏览器输入 `:<映射端口>`,并以默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录控制台。 -![console.png](/images/docs/tencent-tke/console.png) - {{< notice tip >}} @@ -140,5 +136,4 @@ kubectl -n kubesphere-system rollout restart deploy ks-controller-manager ### 通过 KubeSphere 开启附加组件 以上示例演示了默认的最小安装过程,要在 KubeSphere 中启用其他组件,请参阅[启用可插拔组件](../../../pluggable-components/)。 -全部附加组件开启并安装成功后,进入集群管理界面,可以得到如下界面呈现效果,特别是在 `服务组件` 部分可以看到已经开启的各个基础和附加组件: -![console-full.png](/images/docs/tencent-tke/console-full.png) +全部附加组件开启并安装成功后,进入集群管理界面,在**系统组件**区域可以看到已经开启的各个基础和附加组件。 diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md index f843c44e1..ac6e35513 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack.md @@ -142,8 +142,8 @@ alicloud-disk-topology diskplugin.csi.alibabacloud.com Delete 1.使用 [ks-installer](https://github.com/kubesphere/ks-installer) 在已有的 Kubernetes 集群上来部署 KubeSphere,下载 YAML 文件: ``` -wget https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml -wget https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml +wget https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` @@ -209,9 +209,7 @@ https://kubesphere.io 2020-xx-xx xx:xx:xx 现在已经安装了 KubeSphere,您可以按照以下步骤访问 KubeSphere 的 Web 控制台。 -- 切换到 kubesphere-system 命名空间,选择服务,选择 ks-console 点击更新 - - ![ks-console](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-console.png) +- 切换到 kubesphere-system 命名空间,选择服务,选择 ks-console 点击更新 。 - 将 service 类型 `NodePort` 更改为 `LoadBalancer` ,完成后点击更新。 @@ -221,9 +219,8 @@ https://kubesphere.io 2020-xx-xx xx:xx:xx ![ack-lb-ip](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ack-lb-ip.png) -- 使用 ACK 生成的 external-ip 访问 KubeSphere 的 Web 控制台, 默认帐户和密码(`admin/P@88w0rd`),在集群概述页面中,可以看到如下图所示的仪表板。 +- 使用 ACK 生成的 external-ip 访问 KubeSphere 的 Web 控制台, 默认帐户和密码(`admin/P@88w0rd`)。 - ![ks-UI](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-UI.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md index 330cecb87..33226376c 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md @@ -98,9 +98,9 @@ Azure Kubernetes Services 本身将放置在`KubeSphereRG`中。 请使用以下命令开始部署 KubeSphere。 ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 可以通过以下命令检查安装日志: @@ -147,9 +147,7 @@ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ks-console LoadBalancer 10.0.181.93 13.86.xxx.xxx 80:30194/TCP 13m 6379/TCP 10m ``` -使用 external-ip 地址用默认帐户和密码(admin/P@88w0rd)访问控制台。在集群概述页面中,您可以看到如下图所示的仪表板。 - -![aks-cluster](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-cluster.png) +使用 external-ip 地址用默认帐户和密码(admin/P@88w0rd)访问控制台。 ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md index 754073a58..136f9516c 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md @@ -30,7 +30,7 @@ weight: 4230 {{< notice note >}} -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 此示例中包括 3 个节点。您可以根据自己的需求添加更多节点,尤其是在生产环境中。 - 机器类型 Standard/4 GB/2 vCPU 仅用于最小化安装的,如果您计划启用多个可插拔组件或将集群用于生产,建议将节点升级到规格更大的类型(例如,CPU-Optimized /8 GB /4 vCPUs)。DigitalOcean 是基于工作节点类型来配置主节点,而对于标准节点,API server 可能会很快会变得无响应。 @@ -47,9 +47,9 @@ weight: 4230 - 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - 检查安装日志: @@ -108,9 +108,8 @@ https://kubesphere.io 2020-xx-xx xx:xx:xx {{}} -- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。在集群概述页面中,可以看到如下图所示的仪表板。 +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 - ![doks-cluster](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/doks-cluster.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md index af8a1acdb..9a1baa901 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md @@ -60,7 +60,7 @@ aws-cli/2.1.2 Python/3.7.3 Linux/4.18.0-193.6.3.el8_2.x86_64 exe/x86_64.centos.8 - 私有:仅启用对集群的 Kubernetes API server 端点的专用访问。来自集群 VPC 内部的 Kubernetes API 请求使用这个私有 VPC 端点。 {{< notice note >}} -如果创建的 VPC 没有出站 Internet 访问,则必须启用私有访问。 + 如果创建的 VPC 没有出站 Internet 访问,则必须启用私有访问。 {{}} - 公有和私有:启用公有和私有访问。 @@ -84,7 +84,7 @@ aws-cli/2.1.2 Python/3.7.3 Linux/4.18.0-193.6.3.el8_2.x86_64 exe/x86_64.centos.8 {{< notice note >}} -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 此示例中包括 3 个节点。您可以根据自己的需求添加更多节点,尤其是在生产环境中。 - t3.medium(2 个 vCPU,4 GB 内存)机器类型仅用于最小化安装,如果要启用可插拔组件或集群用于生产,请选择具有更大规格的机器类型。 - 对于其他设置,您也可以根据自己的需要进行更改,也可以使用默认值。 @@ -130,9 +130,9 @@ aws-cli/2.1.2 Python/3.7.3 Linux/4.18.0-193.6.3.el8_2.x86_64 exe/x86_64.centos.8 - 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - 检查安装日志: @@ -206,9 +206,8 @@ aws-cli/2.1.2 Python/3.7.3 Linux/4.18.0-193.6.3.el8_2.x86_64 exe/x86_64.centos.8 - 使用 EKS 生成的 external-ip 访问 KubeSphere 的 Web 控制台。 -- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台,在集群概述页面中,可以看到如下图所示的仪表板。 +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 - ![eks-cluster](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/gke-cluster.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md index 433aa9eb6..808ec5f4e 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md @@ -30,7 +30,7 @@ weight: 4240 {{< notice note >}} -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 此示例中包括3个节点,可以根据自己的需求添加更多节点,尤其是在生产环境中。 - 最小安装的机器类型为 e2-medium(2 个 vCPU,4GB 内存)。如果要启用可插拔组件或将集群用于生产,请选择具有更高配置的机器类型。 - 对于其他设置,可以根据自己的需要进行更改,也可以使用默认值。 @@ -46,9 +46,9 @@ weight: 4240 - 使用 kubectl 安装 KubeSphere,以下命令仅用于默认的最小安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` - 检查安装日志: @@ -99,9 +99,8 @@ weight: 4240 {{}} -- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台,在集群概述页面中,可以看到如下图所示的仪表板。 +- 使用默认帐户和密码(`admin/P@88w0rd`)登录控制台。 - ![gke-cluster](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/gke-cluster.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md index a6f306d43..870e8b566 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce.md @@ -15,7 +15,7 @@ weight: 4250 首先按使用环境的资源需求创建 Kubernetes 集群,满足以下一些条件即可(如已有环境并满足条件可跳过本节内容): -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 需要确保 Kubernetes 集群所使用的云主机的网络正常工作,可以通过在创建集群的同时**自动创建**或**使用已有**弹性 IP;或者在集群创建后自行配置网络(如配置 [NAT 网关](https://support.huaweicloud.com/natgateway/))。 - 工作节点规格建议选择 `s3.xlarge.2` 的 `4核|8GB` 配置,并按需扩展工作节点数量(通常生产环境需要 3 个及以上工作节点)。 @@ -74,8 +74,8 @@ volumeBindingMode: Immediate 接下来就可以使用 [ks-installer](https://github.com/kubesphere/ks-installer) 在已有的 Kubernetes 集群上来部署 KubeSphere,建议首先还是以最小功能集进行安装,可执行以下命令: ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 执行部署命令后,可以通过进入**工作负载** > **容器组 Pod** 界面,在右侧面板中查询 `kubesphere-system` 命名空间下的 Pod 运行状态了解 KubeSphere 平台最小功能集的部署状态;通过该命名空间下 `ks-console-xxxx` 容器的状态来了解 KubeSphere 控制台应用的可用状态。 @@ -86,7 +86,7 @@ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3 通过 `kubesphere-system` 命名空间下的 Pod 运行状态确认 KubeSphere 基础组件都已进入运行状态后,我们需要为 KubeSphere 控制台开启外网访问。 -进入**资源管理** > **网络管理**,在右侧面板中选择 `ks-console` 更改网络访问方式,建议选用 `负载均衡(LoadBalancer` 访问方式(需绑定弹性公网 IP),配置完成后如下图: +进入**资源管理** > **网络**,在右侧面板中选择 `ks-console` 更改网络访问方式,建议选用 `负载均衡(LoadBalancer` 访问方式(需绑定弹性公网 IP),配置完成后如下图: ![开启 KubeSphere 外网访问](/images/docs/huawei-cce/zh/expose-ks-console.png) @@ -94,9 +94,7 @@ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3 ![为 KubeSphere 控制台配置负载均衡访问](/images/docs/huawei-cce/zh/edit-ks-console-svc.png) -通过负载均衡绑定公网访问后,即可使用给定的访问地址进行访问,进入到 KubeSphere 的登录界面并使用默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录平台: - -![登录 KubeSphere 平台](/images/docs/huawei-cce/zh/login-ks-console.png) +通过负载均衡绑定公网访问后,即可使用给定的访问地址进行访问,进入到 KubeSphere 的登录界面并使用默认帐户(用户名 `admin`,密码 `P@88w0rd`)即可登录平台。 ### 通过 KubeSphere 开启附加组件 @@ -104,7 +102,7 @@ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3 {{< notice warning >}} -在开启 Istio 组件之前,由于自定义资源定义 (CRD) 冲突的问题,需要先删除华为 CCE 自带的 `applications.app.k8s.io` ,最直接的方式是通过 kubectl 工具来完成: +在开启 Istio 组件之前,由于定制资源定义(CRD)冲突的问题,需要先删除华为 CCE 自带的 `applications.app.k8s.io` ,最直接的方式是通过 kubectl 工具来完成: ```bash kubectl delete crd applications.app.k8s.io @@ -112,6 +110,4 @@ kubectl delete crd applications.app.k8s.io {{}} -全部附加组件开启并安装成功后,进入集群管理界面,可以得到如下界面呈现效果,特别是在 `服务组件` 部分可以看到已经开启的各个基础和附加组件: - -![KubeSphere 全功能集管理界面](/images/docs/huawei-cce/zh/view-ks-console-full.png) +全部附加组件开启并安装成功后,进入集群管理界面,在**系统组件** 区域可以看到已经开启的各个基础和附加组件。 diff --git a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md index f50fcc903..0579ecd1d 100644 --- a/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md +++ b/content/zh/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md @@ -28,7 +28,7 @@ weight: 4260 {{< notice note >}} -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 建议您在**可见性类型**中选择**公共**,即每个节点会分配到一个公共 IP 地址,此地址之后可用于访问 KubeSphere Web 控制台。 - 在 Oracle Cloud 中,**配置**定义了一个实例会分配到的 CPU 和内存等资源量,本示例使用 `VM.Standard.E2.2 (2 CPUs and 16G Memory)`。有关更多信息,请参见 [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard)。 - 本示例包含 3 个节点,可以根据需求自行添加节点(尤其是生产环境)。 @@ -55,8 +55,8 @@ weight: 4260 ![cloud-shell-oke](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/cloud-shell-oke.jpg) - {{< notice warning >}} -如果不在 Cloud Shell 中执行该命令,您无法继续进行以下操作。 + {{< notice warning >}}如果不在 Cloud Shell 中执行该命令,您无法继续进行以下操作。 + {{}} ## 在 OKE 上安装 KubeSphere @@ -64,9 +64,9 @@ weight: 4260 1. 使用 kubectl 安装 KubeSphere。直接输入以下命令会默认执行 KubeSphere 的最小化安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 2. 检查安装日志: @@ -81,11 +81,11 @@ weight: 4260 ##################################################### ### Welcome to KubeSphere! ### ##################################################### - + Console: http://10.0.10.2:30880 Account: admin Password: P@88w0rd - + NOTES: 1. After logging into the console, please check the monitoring status of service components in @@ -93,7 +93,7 @@ weight: 4260 ready, please wait patiently until all components are ready. 2. Please modify the default password after login. - + ##################################################### https://kubesphere.io 20xx-xx-xx xx:xx:xx ``` @@ -136,7 +136,6 @@ KubeSphere 安装完成后,您可以通过 `NodePort` 或 `LoadBalancer` 的 6. 访问此外部 IP 地址并通过默认的帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。在**集群管理**页面,您可以看到集群概览。 - ![概览页面](/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/概览页面.jpg) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/introduction/overview.md b/content/zh/docs/installing-on-kubernetes/introduction/overview.md index 31aa591c8..26d23e848 100644 --- a/content/zh/docs/installing-on-kubernetes/introduction/overview.md +++ b/content/zh/docs/installing-on-kubernetes/introduction/overview.md @@ -32,9 +32,9 @@ KubeSphere 承诺为用户提供即插即用架构,您可以轻松地将 KubeS 1. 执行以下命令以开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 2. 检查安装日志: @@ -51,7 +51,6 @@ KubeSphere 承诺为用户提供即插即用架构,您可以轻松地将 KubeS 4. 确保在安全组中打开了 30880 端口,通过 NodePort (`IP:30880`) 使用默认帐户和密码 (`admin/P@88w0rd`) 访问 Web 控制台。 - ![kubesphere-console](/images/docs/zh-cn/installing-on-kubernetes/introduction/login.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/installing-on-kubernetes/introduction/prerequisites.md b/content/zh/docs/installing-on-kubernetes/introduction/prerequisites.md index ad7ec3ef2..e822e815f 100644 --- a/content/zh/docs/installing-on-kubernetes/introduction/prerequisites.md +++ b/content/zh/docs/installing-on-kubernetes/introduction/prerequisites.md @@ -10,7 +10,7 @@ weight: 4120 您可以在虚拟机和裸机上安装 KubeSphere,并同时配置 Kubernetes。另外,只要 Kubernetes 集群满足以下前提条件,那么您也可以在云托管和本地 Kubernetes 集群上部署 KubeSphere。 -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:v1.19.x,v1.20.x,v1.21.x 或 v1.22.x(实验性支持)。 - 可用 CPU > 1 核;内存 > 2 G。 - Kubernetes 集群已配置**默认** StorageClass(请使用 `kubectl get sc` 进行确认)。 - 使用 `--cluster-signing-cert-file` 和 `--cluster-signing-key-file` 参数启动集群时,kube-apiserver 将启用 CSR 签名功能。请参见 [RKE 安装问题](https://github.com/kubesphere/kubesphere/issues/1925#issuecomment-591698309)。 diff --git a/content/zh/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md b/content/zh/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md index 655b34d25..f942a1d1d 100644 --- a/content/zh/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md +++ b/content/zh/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped.md @@ -90,19 +90,19 @@ Docker 使用 `/var/lib/docker` 作为默认路径来存储所有 Docker 相关 1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} - 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/#kubesphere-v310-镜像清单)。 + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/#kubesphere-v321-镜像清单)。 {{}} 2. 下载 `offline-installation-tool.sh`。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. 使 `.sh` 文件可执行。 @@ -162,8 +162,8 @@ Docker 使用 `/var/lib/docker` 作为默认路径来存储所有 Docker 相关 1. 执行以下命令下载这两个文件,并将它们传输至您充当任务机的机器,用于安装。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` 2. 编辑 `cluster-configuration.yaml` 添加您的私有镜像仓库。例如,本教程中的仓库地址是 `dockerhub.kubekey.local`,将它用作 `.spec.local_registry` 的值,如下所示: @@ -239,164 +239,157 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx {{}} -![登录 kubesphere](/images/docs/zh-cn/installing-on-kubernetes/installing-on-on-premises-kubernetes/air-gapped-installation/kubesphere-login.PNG) - ## 附录 -### KubeSphere v3.1.1 镜像清单 +### KubeSphere 3.2.1 镜像清单 ```txt ##k8s-images -kubesphere/kube-apiserver:v1.20.6 -kubesphere/kube-scheduler:v1.20.6 -kubesphere/kube-proxy:v1.20.6 -kubesphere/kube-controller-manager:v1.20.6 -kubesphere/kube-apiserver:v1.19.8 -kubesphere/kube-scheduler:v1.19.8 -kubesphere/kube-proxy:v1.19.8 -kubesphere/kube-controller-manager:v1.19.8 +kubesphere/kube-apiserver:v1.22.1 +kubesphere/kube-controller-manager:v1.22.1 +kubesphere/kube-proxy:v1.22.1 +kubesphere/kube-scheduler:v1.22.1 +kubesphere/kube-apiserver:v1.21.5 +kubesphere/kube-controller-manager:v1.21.5 +kubesphere/kube-proxy:v1.21.5 +kubesphere/kube-scheduler:v1.21.5 +kubesphere/kube-apiserver:v1.20.10 +kubesphere/kube-controller-manager:v1.20.10 +kubesphere/kube-proxy:v1.20.10 +kubesphere/kube-scheduler:v1.20.10 kubesphere/kube-apiserver:v1.19.9 -kubesphere/kube-scheduler:v1.19.9 -kubesphere/kube-proxy:v1.19.9 kubesphere/kube-controller-manager:v1.19.9 -kubesphere/kube-apiserver:v1.18.8 -kubesphere/kube-scheduler:v1.18.8 -kubesphere/kube-proxy:v1.18.8 -kubesphere/kube-controller-manager:v1.18.8 -kubesphere/kube-apiserver:v1.17.9 -kubesphere/kube-scheduler:v1.17.9 -kubesphere/kube-proxy:v1.17.9 -kubesphere/kube-controller-manager:v1.17.9 -kubesphere/pause:3.1 -kubesphere/pause:3.2 -kubesphere/etcd:v3.4.13 -calico/cni:v3.16.3 -calico/kube-controllers:v3.16.3 -calico/node:v3.16.3 -calico/pod2daemon-flexvol:v3.16.3 -calico/typha:v3.16.3 +kubesphere/kube-proxy:v1.19.9 +kubesphere/kube-scheduler:v1.19.9 +kubesphere/pause:3.5 +kubesphere/pause:3.4.1 +coredns/coredns:1.8.0 +calico/cni:v3.20.0 +calico/kube-controllers:v3.20.0 +calico/node:v3.20.0 +calico/pod2daemon-flexvol:v3.20.0 +calico/typha:v3.20.0 kubesphere/flannel:v0.12.0 -coredns/coredns:1.6.9 -kubesphere/k8s-dns-node-cache:1.15.12 openebs/provisioner-localpv:2.10.1 openebs/linux-utils:2.10.0 -kubesphere/nfs-client-provisioner:v3.1.0-k8s1.11 -##csi-images -csiplugin/csi-neonsan:v1.2.0 -csiplugin/csi-neonsan-ubuntu:v1.2.0 -csiplugin/csi-neonsan-centos:v1.2.0 -csiplugin/csi-provisioner:v1.5.0 -csiplugin/csi-attacher:v2.1.1 -csiplugin/csi-resizer:v0.4.0 -csiplugin/csi-snapshotter:v2.0.1 -csiplugin/csi-node-driver-registrar:v1.2.0 -csiplugin/csi-qingcloud:v1.2.0 +kubesphere/k8s-dns-node-cache:1.15.12 ##kubesphere-images -kubesphere/ks-apiserver:v3.1.1 -kubesphere/ks-console:v3.1.1 -kubesphere/ks-controller-manager:v3.1.1 -kubesphere/ks-installer:v3.1.1 +kubesphere/ks-installer:v3.2.1 +kubesphere/ks-apiserver:v3.2.1 +kubesphere/ks-console:v3.2.1 +kubesphere/ks-controller-manager:v3.2.1 kubesphere/kubectl:v1.20.0 -kubesphere/kubectl:v1.19.0 -redis:5.0.12-alpine -alpine:3.14 -haproxy:2.0.22-alpine -nginx:1.14-alpine +kubesphere/kubefed:v0.8.1 +kubesphere/tower:v0.2.0 minio/minio:RELEASE.2019-08-07T01-59-21Z minio/mc:RELEASE.2019-08-07T23-14-43Z +csiplugin/snapshot-controller:v4.0.0 +kubesphere/nginx-ingress-controller:v0.48.1 mirrorgooglecontainers/defaultbackend-amd64:1.4 -kubesphere/nginx-ingress-controller:v0.35.0 -osixia/openldap:1.3.0 -csiplugin/snapshot-controller:v3.0.3 -kubesphere/kubefed:v0.7.0 -kubesphere/tower:v0.2.0 -kubesphere/prometheus-config-reloader:v0.42.1 -kubesphere/prometheus-operator:v0.42.1 -prom/alertmanager:v0.21.0 -prom/prometheus:v2.26.0 -prom/node-exporter:v0.18.1 -kubesphere/ks-alerting-migration:v3.1.0 -jimmidyson/configmap-reload:v0.3.0 -kubesphere/notification-manager-operator:v1.0.0 -kubesphere/notification-manager:v1.0.0 kubesphere/metrics-server:v0.4.2 +redis:5.0.14-alpine +haproxy:2.0.25-alpine +alpine:3.14 +osixia/openldap:1.3.0 +kubesphere/netshoot:v1.0 +##kubeedge-images +kubeedge/cloudcore:v1.7.2 +kubesphere/edge-watcher:v0.1.1 +kubesphere/edge-watcher-agent:v0.1.0 +##gatekeeper-images +openpolicyagent/gatekeeper:v3.5.2 +##openpitrix-images +kubesphere/openpitrix-jobs:v3.2.1 +##kubesphere-devops-images +kubesphere/devops-apiserver:v3.2.1 +kubesphere/devops-controller:v3.2.1 +kubesphere/devops-tools:v3.2.1 +kubesphere/ks-jenkins:v3.2.0-2.249.1 +jenkins/jnlp-slave:3.27-1 +kubesphere/builder-base:v3.2.0 +kubesphere/builder-nodejs:v3.2.0 +kubesphere/builder-maven:v3.2.0 +kubesphere/builder-python:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-base:v3.2.0-podman +kubesphere/builder-nodejs:v3.2.0-podman +kubesphere/builder-maven:v3.2.0-podman +kubesphere/builder-python:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/s2ioperator:v3.2.0 +kubesphere/s2irun:v3.2.0 +kubesphere/s2i-binary:v3.2.0 +kubesphere/tomcat85-java11-centos7:v3.2.0 +kubesphere/tomcat85-java11-runtime:v3.2.0 +kubesphere/tomcat85-java8-centos7:v3.2.0 +kubesphere/tomcat85-java8-runtime:v3.2.0 +kubesphere/java-11-centos7:v3.2.0 +kubesphere/java-8-centos7:v3.2.0 +kubesphere/java-8-runtime:v3.2.0 +kubesphere/java-11-runtime:v3.2.0 +kubesphere/nodejs-8-centos7:v3.2.0 +kubesphere/nodejs-6-centos7:v3.2.0 +kubesphere/nodejs-4-centos7:v3.2.0 +kubesphere/python-36-centos7:v3.2.0 +kubesphere/python-35-centos7:v3.2.0 +kubesphere/python-34-centos7:v3.2.0 +kubesphere/python-27-centos7:v3.2.0 +##kubesphere-monitoring-images +jimmidyson/configmap-reload:v0.3.0 +prom/prometheus:v2.26.0 +kubesphere/prometheus-config-reloader:v0.43.2 +kubesphere/prometheus-operator:v0.43.2 kubesphere/kube-rbac-proxy:v0.8.0 kubesphere/kube-state-metrics:v1.9.7 -openebs/provisioner-localpv:2.3.0 +prom/node-exporter:v0.18.1 +kubesphere/k8s-prometheus-adapter-amd64:v0.6.0 +prom/alertmanager:v0.21.0 thanosio/thanos:v0.18.0 grafana/grafana:7.4.3 +kubesphere/kube-rbac-proxy:v0.8.0 +kubesphere/notification-manager-operator:v1.4.0 +kubesphere/notification-manager:v1.4.0 +kubesphere/notification-tenant-sidecar:v3.2.0 ##kubesphere-logging-images -kubesphere/elasticsearch-oss:6.7.0-1 kubesphere/elasticsearch-curator:v5.7.6 -kubesphere/fluentbit-operator:v0.5.0 -kubesphere/fluentbit-operator:migrator -kubesphere/fluent-bit:v1.6.9 -elastic/filebeat:6.7.0 -kubesphere/kube-auditing-operator:v0.1.2 -kubesphere/kube-auditing-webhook:v0.1.2 -kubesphere/kube-events-exporter:v0.1.0 -kubesphere/kube-events-operator:v0.1.0 -kubesphere/kube-events-ruler:v0.2.0 -kubesphere/log-sidecar-injector:1.1 +kubesphere/elasticsearch-oss:6.7.0-1 +kubesphere/fluentbit-operator:v0.11.0 docker:19.03 +kubesphere/fluent-bit:v1.8.3 +kubesphere/log-sidecar-injector:1.1 +elastic/filebeat:6.7.0 +kubesphere/kube-events-operator:v0.3.0 +kubesphere/kube-events-exporter:v0.3.0 +kubesphere/kube-events-ruler:v0.3.0 +kubesphere/kube-auditing-operator:v0.2.0 +kubesphere/kube-auditing-webhook:v0.2.0 ##istio-images -istio/pilot:1.6.10 -istio/proxyv2:1.6.10 -jaegertracing/jaeger-agent:1.17 -jaegertracing/jaeger-collector:1.17 -jaegertracing/jaeger-es-index-cleaner:1.17 -jaegertracing/jaeger-operator:1.17.1 -jaegertracing/jaeger-query:1.17 -kubesphere/kiali:v1.26.1 -kubesphere/kiali-operator:v1.26.1 -##kubesphere-devops-images -kubesphere/ks-jenkins:2.249.1 -jenkins/jnlp-slave:3.27-1 -kubesphere/s2ioperator:v3.1.0 -kubesphere/s2irun:v2.1.1 -kubesphere/builder-base:v3.1.0 -kubesphere/builder-nodejs:v3.1.0 -kubesphere/builder-maven:v3.1.0 -kubesphere/builder-go:v3.1.0 -kubesphere/s2i-binary:v2.1.0 -kubesphere/tomcat85-java11-centos7:v2.1.0 -kubesphere/tomcat85-java11-runtime:v2.1.0 -kubesphere/tomcat85-java8-centos7:v2.1.0 -kubesphere/tomcat85-java8-runtime:v2.1.0 -kubesphere/java-11-centos7:v2.1.0 -kubesphere/java-8-centos7:v2.1.0 -kubesphere/java-8-runtime:v2.1.0 -kubesphere/java-11-runtime:v2.1.0 -kubesphere/nodejs-8-centos7:v2.1.0 -kubesphere/nodejs-6-centos7:v2.1.0 -kubesphere/nodejs-4-centos7:v2.1.0 -kubesphere/python-36-centos7:v2.1.0 -kubesphere/python-35-centos7:v2.1.0 -kubesphere/python-34-centos7:v2.1.0 -kubesphere/python-27-centos7:v2.1.0 -##openpitrix-images -kubespheredev/openpitrix-jobs:v3.1.1 -##weave-scope-images -weaveworks/scope:1.13.0 -##kubeedge-images -kubeedge/cloudcore:v1.6.2 -kubesphere/edge-watcher:v0.1.0 -kubesphere/kube-rbac-proxy:v0.5.0 -kubesphere/edge-watcher-agent:v0.1.0 -##example-images-images -kubesphere/examples-bookinfo-productpage-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v2:1.16.2 -kubesphere/examples-bookinfo-reviews-v3:1.16.2 -kubesphere/examples-bookinfo-details-v1:1.16.2 -kubesphere/examples-bookinfo-ratings-v1:1.16.3 +istio/pilot:1.11.1 +istio/proxyv2:1.11.1 +jaegertracing/jaeger-operator:1.27 +jaegertracing/jaeger-agent:1.27 +jaegertracing/jaeger-collector:1.27 +jaegertracing/jaeger-query:1.27 +jaegertracing/jaeger-es-index-cleaner:1.27 +kubesphere/kiali-operator:v1.38.1 +kubesphere/kiali:v1.38 +##example-images busybox:1.31.1 +nginx:1.14-alpine joosthofman/wget:1.0 -kubesphere/netshoot:v1.0 nginxdemos/hello:plain-text wordpress:4.8-apache mirrorgooglecontainers/hpa-example:latest java:openjdk-8-jre-alpine fluent/fluentd:v1.4.2-2.0 perl:latest +kubesphere/examples-bookinfo-productpage-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v2:1.16.2 +kubesphere/examples-bookinfo-details-v1:1.16.2 +kubesphere/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +weaveworks/scope:1.13.0 ``` diff --git a/content/zh/docs/installing-on-linux/cluster-operation/add-edge-nodes.md b/content/zh/docs/installing-on-linux/cluster-operation/add-edge-nodes.md index 4ee883fed..b43df370d 100644 --- a/content/zh/docs/installing-on-linux/cluster-operation/add-edge-nodes.md +++ b/content/zh/docs/installing-on-linux/cluster-operation/add-edge-nodes.md @@ -92,7 +92,7 @@ KubeSphere 利用 [KubeEdge](https://kubeedge.io/zh/) 将原生容器化应用 1. 使用 `admin` 用户登录控制台,点击左上角的**平台管理**。 -2. 选择**集群管理**,然后导航至**节点管理**下的**边缘节点**。 +2. 选择**集群管理**,然后导航至**节点**下的**边缘节点**。 {{< notice note >}} @@ -100,7 +100,7 @@ KubeSphere 利用 [KubeEdge](https://kubeedge.io/zh/) 将原生容器化应用 {{}} -3. 点击**添加节点**。在出现的对话框中,设置边缘节点的节点名称并输入其内网 IP 地址。点击**验证**以继续。 +3. 点击**添加**。在出现的对话框中,设置边缘节点的节点名称并输入其内网 IP 地址。点击**验证**以继续。 {{< notice note >}} @@ -109,7 +109,7 @@ KubeSphere 利用 [KubeEdge](https://kubeedge.io/zh/) 将原生容器化应用 {{}} -4. 复制**添加命令**下自动创建的命令,并在您的边缘节点上运行该命令。 +4. 复制**边缘节点配置命令**下自动创建的命令,并在您的边缘节点上运行该命令。 {{< notice note >}} @@ -117,19 +117,15 @@ KubeSphere 利用 [KubeEdge](https://kubeedge.io/zh/) 将原生容器化应用 {{}} - ![edge-node1](/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node1.png) - 5. 关闭对话框,刷新页面,您将看到边缘节点显示在列表中。 - ![edge-node-2](/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-2.png) - {{< notice note >}} 添加边缘节点后,如果在**边缘节点**页面查看不到 CPU 和内存资源使用情况,请确保您的集群中已安装 [Metrics Server](../../../pluggable-components/metrics-server/) 0.4.1 或以上版本。 {{}} - -6. 边缘节点加入集群后,部分 Pod 在调度至该边缘节点上后可能会一直处于 `Pending` 状态。由于部分守护进程集(例如,Calico)有强容忍度,在当前版本中 (KubeSphere v3.1.1),您需要手动 Patch Pod 以防止它们调度至该边缘节点。 + +6. 边缘节点加入集群后,部分 Pod 在调度至该边缘节点上后可能会一直处于 `Pending` 状态。由于部分守护进程集(例如,Calico)有强容忍度,在当前版本中 (KubeSphere 3.2.1),您需要手动 Patch Pod 以防止它们调度至该边缘节点。 ```bash #!/bin/bash diff --git a/content/zh/docs/installing-on-linux/cluster-operation/add-new-nodes.md b/content/zh/docs/installing-on-linux/cluster-operation/add-new-nodes.md index c4f2e0356..150979b79 100644 --- a/content/zh/docs/installing-on-linux/cluster-operation/add-new-nodes.md +++ b/content/zh/docs/installing-on-linux/cluster-operation/add-new-nodes.md @@ -66,7 +66,7 @@ KubeSphere 使用一段时间之后,由于工作负载不断增加,您可能 ./kk add nodes -f sample.yaml ``` -4. 安装完成后,您将能够在 KubeSphere 的控制台上查看新节点及其信息。在**集群管理**页面,选择左侧菜单**节点管理**下的**集群节点**,或者执行命令 `kubectl get node` 以检查更改。 +4. 安装完成后,您将能够在 KubeSphere 的控制台上查看新节点及其信息。在**集群管理**页面,选择左侧菜单**节点**下的**集群节点**,或者执行命令 `kubectl get node` 以检查更改。 ```bash $ kubectl get node @@ -121,7 +121,7 @@ KubeSphere 使用一段时间之后,由于工作负载不断增加,您可能 address: 172.16.0.253 port: 6443 kubernetes: - version: v1.17.9 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local proxyMode: ipvs diff --git a/content/zh/docs/installing-on-linux/cluster-operation/remove-nodes.md b/content/zh/docs/installing-on-linux/cluster-operation/remove-nodes.md index 925edc505..b13cc5513 100644 --- a/content/zh/docs/installing-on-linux/cluster-operation/remove-nodes.md +++ b/content/zh/docs/installing-on-linux/cluster-operation/remove-nodes.md @@ -10,7 +10,7 @@ weight: 3620 将节点标记为不可调度可防止调度程序将新的 Pod 放置到该节点上,同时不会影响该节点上的现有 Pod。作为节点重启或者其他维护之前的准备步骤,这十分有用。 -以 `admin` 身份登录控制台,访问**集群管理**页面。若要将节点标记为不可调度,从左侧菜单中选择**节点管理**下的**集群节点**,找到想要从集群中删除的节点,点击**停止调度**。或者,直接执行命令 `kubectl cordon $NODENAME`。有关更多详细信息,请参见 [Kubernetes 节点](https://kubernetes.io/docs/concepts/architecture/nodes/)。 +以 `admin` 身份登录控制台,访问**集群管理**页面。若要将节点标记为不可调度,从左侧菜单中选择**节点**下的**集群节点**,找到想要从集群中删除的节点,点击**停止调度**。或者,直接执行命令 `kubectl cordon $NODENAME`。有关更多详细信息,请参见 [Kubernetes 节点](https://kubernetes.io/docs/concepts/architecture/nodes/)。 ![cordon](/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png) diff --git a/content/zh/docs/installing-on-linux/high-availability-configurations/ha-configuration.md b/content/zh/docs/installing-on-linux/high-availability-configurations/ha-configuration.md index e9c0e4890..f75a42a06 100644 --- a/content/zh/docs/installing-on-linux/high-availability-configurations/ha-configuration.md +++ b/content/zh/docs/installing-on-linux/high-availability-configurations/ha-configuration.md @@ -48,7 +48,7 @@ weight: 3150 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -64,7 +64,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -79,7 +79,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -89,15 +89,15 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - chmod +x kk ``` -创建包含默认配置的示例配置文件。这里使用 Kubernetes v1.20.4 作为示例。 +创建包含默认配置的示例配置文件。这里使用 Kubernetes v1.21.5 作为示例。 ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 @@ -146,12 +146,14 @@ spec: ### 配置负载均衡器 ```yaml -## Public LB config example -## apiserver_loadbalancer_domain_name: "lb.kubesphere.local" +spec: controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + #internalLoadbalancer: haproxy + domain: lb.kubesphere.local address: "192.168.0.xx" - port: "6443" + port: 6443 ``` {{< notice note >}} @@ -159,6 +161,7 @@ spec: - `config-sample.yaml` 文件中的 `address` 和 `port` 应缩进两个空格。 - 大多数情况下,您需要在负载均衡器的 `address` 字段中提供**私有 IP 地址**。但是,不同的云厂商可能对负载均衡器有不同的配置。例如,如果您在阿里云上配置服务器负载均衡器 (SLB),平台会为 SLB 分配一个公共 IP 地址,所以您需要在 `address` 字段中指定公共 IP 地址。 - 负载均衡器默认的内部访问域名是 `lb.kubesphere.local`。 +- 若要使用内置负载均衡器,请将 `internalLoadbalancer` 字段取消注释。 {{}} diff --git a/content/zh/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md b/content/zh/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md new file mode 100644 index 000000000..c0376da71 --- /dev/null +++ b/content/zh/docs/installing-on-linux/high-availability-configurations/internal-ha-configuration.md @@ -0,0 +1,194 @@ +--- +title: "使用 KubeKey 内置 HAproxy 创建高可用集群" +keywords: 'KubeSphere, Kubernetes, KubeKey, 高可用, 安装' +description: '如何使用 KubeKey 内置的 HAproxy 安装一个高可用的 KubeSphere 与 Kubernetes 集群。' +linkTitle: "使用 KubeKey 内置 HAproxy 创建高可用集群" +weight: 3150 +--- + +[KubeKey](https://github.com/kubesphere/kubekey) 作为一种集群安装工具,从版本 v1.2.1 开始,提供了内置高可用模式,支持一键部署高可用集群环境。KubeKey 的高可用模式实现方式称作本地负载均衡模式。具体表现为 KubeKey 会在每一个工作节点上部署一个负载均衡器(HAproxy),所有主节点的 Kubernetes 组件连接其本地的 kube-apiserver ,而所有工作节点的 Kubernetes 组件通过由 KubeKey 部署的负载均衡器反向代理到多个主节点的 kube-apiserver 。这种模式相较于专用到负载均衡器来说效率有所降低,因为会引入额外的健康检查机制,但是如果当前环境无法提供外部负载均衡器或者虚拟 IP(VIP)时这将是一种更实用、更有效、更方便的高可用部署模式。 + +本教程演示了在 Linux 上安装 KubeSphere 时,使用 KubeKey 内置高可用模式部署的大体配置。 + +## 架构 + +在您开始操作前,请确保准备了 6 台 Linux 机器,其中 3 台充当主节点,另外 3 台充当工作节点。下图展示了内置高可用模式的架构图。有关系统和网络要求的更多信息,请参见[多节点安装](../../../installing-on-linux/introduction/multioverview/#步骤1准备-linux-主机)。 + +![高可用架构](/images/docs/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png) + +## 下载 KubeKey + +请按照以下步骤下载 KubeKey。 + +{{< tabs >}} + +{{< tab "如果您能正常访问 GitHub 和 Googleapis" >}} + +从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - +``` + +{{}} + +{{< tab "如果您访问 GitHub 和 Googleapis 受限" >}} + +先执行以下命令以确保您从正确的区域下载 KubeKey。 + +```bash +export KKZONE=cn +``` + +执行以下命令下载 KubeKey: + +```bash +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - +``` + +{{< notice note >}} + +在您下载 KubeKey 后,如果您将其传至新的机器,且访问 Googleapis 同样受限,在您执行以下步骤之前请务必再次执行 `export KKZONE=cn` 命令。 + +{{}} + +{{}} + +{{}} + +{{< notice note >}} + +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 + +{{}} + +为 `kk` 添加可执行权限: + +```bash +chmod +x kk +``` + +创建包含默认配置的示例配置文件。这里使用 Kubernetes v1.21.5 作为示例。 + +```bash +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 +``` + +{{< notice note >}} + +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + +- 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 + +- 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 + +{{}} + +## 部署 KubeSphere 和 Kubernetes + +运行以上命令后,会创建一个配置文件 `config-sample.yaml`。编辑该文件以添加机器信息、配置负载均衡器和其他内容。 + +{{< notice note >}} + +如果您自定义文件名,文件名称可能会不同。 + +{{}} + +### config-sample.yaml 示例 + +```yaml +spec: + hosts: + - {name: master1, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123} + - {name: master2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123} + - {name: master3, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: node1, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: node2, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + - {name: node3, address: 192.168.0.7, internalAddress: 192.168.0.7, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - master1 + - master2 + - master3 + master: + - master1 + - master2 + - master3 + worker: + - node1 + - node2 + - node3 +``` + +有关该配置文件中不同字段的更多信息,请参见 [Kubernetes 集群配置](../../../installing-on-linux/introduction/vars/)和[多节点安装](../../../installing-on-linux/introduction/multioverview/#2-编辑配置文件)。 + +### 开启内置高可用模式 + +```yaml +spec: + controlPlaneEndpoint: + ##Internal loadbalancer for apiservers + internalLoadbalancer: haproxy + + domain: lb.kubesphere.local + address: "" + port: 6443 +``` + +{{< notice note >}} + +- 开启内置高可用模式,需要将 `internalLoadbalancer` 字段取消注释。 +- `config-sample.yaml` 文件中的 `address` 和 `port` 应缩进两个空格。 +- 负载均衡器默认的内部访问域名是 `lb.kubesphere.local`。 + +{{}} + +### 持久化存储插件配置 + +在生产环境中,您需要准备持久化存储并在 `config-sample.yaml` 中配置存储插件(例如 CSI),以明确您想使用哪一种存储服务。有关更多信息,请参见[持久化存储配置](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 + +### 启用可插拔组件(可选) + +自 v2.1.0 起,KubeSphere 解耦了一些核心功能组件。您可以在安装之前或者之后启用这些可插拔组件。如果您不启用这些组件,KubeSphere 将默认以最小化安装。 + +您可以根据您的需求来启用任意可插拔组件。强烈建议您安装这些可插拔组件,以便体验 KubeSphere 提供的全栈特性和功能。启用前,请确保您的机器有足够的 CPU 和内存。有关详情请参见[启用可插拔组件](../../../pluggable-components/)。 + +### 开始安装 + +配置完成后,您可以执行以下命令来开始安装: + +```bash +./kk create cluster -f config-sample.yaml +``` + +### 验证安装 + +1. 运行以下命令查看安装日志。 + + ```bash + kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f + ``` + +2. 若您看到以下信息,您的高可用集群便已创建成功。 + + ```bash + ##################################################### + ### Welcome to KubeSphere! ### + ##################################################### + + Console: http://192.168.0.3:30880 + Account: admin + Password: P@88w0rd + + NOTES: + 1. After you log into the console, please check the + monitoring status of service components in + the "Cluster Management". If any service is not + ready, please wait patiently until all components + are up and running. + 2. Please change the default password after login. + + ##################################################### + https://kubesphere.io 2020-xx-xx xx:xx:xx + ##################################################### + ``` diff --git a/content/zh/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md b/content/zh/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md index 62f8d0ef4..52fa3ebb2 100644 --- a/content/zh/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md +++ b/content/zh/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy.md @@ -1,6 +1,6 @@ --- -title: "使用 Keepalived 和 HAproxy 创建高可用集群" -keywords: 'KubeSphere, Kubernetes, HA, 高可用, 安装, 配置, Keepalived, HAproxy' +title: "使用 Keepalived 和 HAproxy 创建高可用 Kubernetes 集群" +keywords: 'Kubernetes, KubeSphere, HA, 高可用, 安装, 配置, Keepalived, HAproxy' description: '如何使用 Keepalived 和 HAproxy 配置高可用 Kubernetes 集群。' linkTitle: "使用 Keepalived 和 HAproxy 创建高可用集群" weight: 3220 @@ -40,7 +40,7 @@ weight: 3220 ## 配置负载均衡 -[Keepalived](https://www.keepalived.org/) 提供 VRPP 实现,并允许您配置 Linux 机器使负载均衡,预防单点故障。[HAProxy](http://www.haproxy.org/) 提供可靠、高性能的负载均衡,能与 Keepalived 完美配合。 +[Keepalived](https://www.keepalived.org/) 提供 VRRP 实现,并允许您配置 Linux 机器使负载均衡,预防单点故障。[HAProxy](http://www.haproxy.org/) 提供可靠、高性能的负载均衡,能与 Keepalived 完美配合。 由于 `lb1` 和 `lb2` 上安装了 Keepalived 和 HAproxy,如果其中一个节点故障,虚拟 IP 地址(即浮动 IP 地址)将自动与另一个节点关联,使集群仍然可以正常运行,从而实现高可用。若有需要,也可以此为目的,添加更多安装 Keepalived 和 HAproxy 的节点。 @@ -267,7 +267,7 @@ yum install keepalived haproxy psmisc -y 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -283,7 +283,7 @@ export KKZONE=cn 运行以下命令来下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -298,7 +298,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上命令,可以下载 KubeKey 的最新版本 (v1.1.1)。您可以更改命令中的版本号来下载特定的版本。 +通过以上命令,可以下载 KubeKey 的最新版本 (v1.2.1)。您可以更改命令中的版本号来下载特定的版本。 {{}} @@ -308,15 +308,15 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - chmod +x kk ``` -使用默认配置创建一个示例配置文件。此处以 Kubernetes v1.20.4 作为示例。 +使用默认配置创建一个示例配置文件。此处以 Kubernetes v1.21.5 作为示例。 ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您没有在本步骤的命令中添加标志 `--with-kubesphere`,那么除非您使用配置文件中的 `addons` 字段进行安装,或者稍后使用 `./kk create cluster` 时再添加该标志,否则 KubeSphere 将不会被部署。 - 如果您添加标志 `--with-kubesphere` 时未指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 diff --git a/content/zh/docs/installing-on-linux/introduction/air-gapped-installation.md b/content/zh/docs/installing-on-linux/introduction/air-gapped-installation.md index 2645b9c57..ae1b90138 100644 --- a/content/zh/docs/installing-on-linux/introduction/air-gapped-installation.md +++ b/content/zh/docs/installing-on-linux/introduction/air-gapped-installation.md @@ -144,7 +144,7 @@ docker run -d \ ## 步骤 3:下载 KubeKey -与在 Linux 上在线安装 KubeSphere 相似,您需要事先[下载 KubeKey v1.1.1](https://github.com/kubesphere/kubekey/releases)。下载 `tar.gz` 文件,将它传输到充当任务机的本地机器上进行安装。解压文件后,执行以下命令,使 `kk` 可执行。 +与在 Linux 上在线安装 KubeSphere 相似,您需要事先[下载 KubeKey v1.2.1](https://github.com/kubesphere/kubekey/releases)。下载 `tar.gz` 文件,将它传输到充当任务机的本地机器上进行安装。解压文件后,执行以下命令,使 `kk` 可执行。 ```bash chmod +x kk @@ -157,19 +157,19 @@ chmod +x kk 1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} - 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../air-gapped-installation/#kubesphere-v310-镜像清单)。 + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../air-gapped-installation/#kubesphere-v321-镜像清单)。 {{}} 2. 下载 `offline-installation-tool.sh`。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. 使 `.sh` 文件可执行。 @@ -199,18 +199,18 @@ chmod +x kk 5. 下载 Kubernetes 二进制文件。 ```bash - ./offline-installation-tool.sh -b -v v1.17.9 + ./offline-installation-tool.sh -b -v v1.21.5 ``` 如果您无法访问 Google 的对象存储服务,请运行以下命令添加环境变量以变更来源。 ```bash - export KKZONE=cn;./offline-installation-tool.sh -b -v v1.17.9 + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.21.5 ``` {{< notice note >}} - - 您可以根据自己的需求变更下载的 Kubernetes 版本。安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + - 您可以根据自己的需求变更下载的 Kubernetes 版本。安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 运行脚本后,会自动创建一个文件夹 `kubekey`。请注意,您稍后创建集群时,该文件和 `kk` 必须放在同一个目录下。 @@ -257,7 +257,7 @@ chmod +x kk 例如: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -302,7 +302,7 @@ spec: address: "" port: 6443 kubernetes: - version: v1.17.9 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -323,7 +323,7 @@ metadata: name: ks-installer namespace: kubesphere-system labels: - version: v3.1.1 + version: v3.2.1 spec: persistence: storageClass: "" @@ -355,7 +355,7 @@ spec: enabled: false username: "" password: "" - externalElasticsearchUrl: "" + externalElasticsearchHost: "" externalElasticsearchPort: "" console: enableMultiLogin: true @@ -490,166 +490,159 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx {{}} -![登录 kubesphere](/images/docs/zh-cn/installing-on-linux/introduction/air-gapped-installation/kubesphere-login.PNG) - ## 附录 -### KubeSphere v3.1.1 镜像清单 +### KubeSphere 3.2.1 镜像清单 ```txt ##k8s-images -kubesphere/kube-apiserver:v1.20.6 -kubesphere/kube-scheduler:v1.20.6 -kubesphere/kube-proxy:v1.20.6 -kubesphere/kube-controller-manager:v1.20.6 -kubesphere/kube-apiserver:v1.19.8 -kubesphere/kube-scheduler:v1.19.8 -kubesphere/kube-proxy:v1.19.8 -kubesphere/kube-controller-manager:v1.19.8 +kubesphere/kube-apiserver:v1.22.1 +kubesphere/kube-controller-manager:v1.22.1 +kubesphere/kube-proxy:v1.22.1 +kubesphere/kube-scheduler:v1.22.1 +kubesphere/kube-apiserver:v1.21.5 +kubesphere/kube-controller-manager:v1.21.5 +kubesphere/kube-proxy:v1.21.5 +kubesphere/kube-scheduler:v1.21.5 +kubesphere/kube-apiserver:v1.20.10 +kubesphere/kube-controller-manager:v1.20.10 +kubesphere/kube-proxy:v1.20.10 +kubesphere/kube-scheduler:v1.20.10 kubesphere/kube-apiserver:v1.19.9 -kubesphere/kube-scheduler:v1.19.9 -kubesphere/kube-proxy:v1.19.9 kubesphere/kube-controller-manager:v1.19.9 -kubesphere/kube-apiserver:v1.18.8 -kubesphere/kube-scheduler:v1.18.8 -kubesphere/kube-proxy:v1.18.8 -kubesphere/kube-controller-manager:v1.18.8 -kubesphere/kube-apiserver:v1.17.9 -kubesphere/kube-scheduler:v1.17.9 -kubesphere/kube-proxy:v1.17.9 -kubesphere/kube-controller-manager:v1.17.9 -kubesphere/pause:3.1 -kubesphere/pause:3.2 -kubesphere/etcd:v3.4.13 -calico/cni:v3.16.3 -calico/kube-controllers:v3.16.3 -calico/node:v3.16.3 -calico/pod2daemon-flexvol:v3.16.3 -calico/typha:v3.16.3 +kubesphere/kube-proxy:v1.19.9 +kubesphere/kube-scheduler:v1.19.9 +kubesphere/pause:3.5 +kubesphere/pause:3.4.1 +coredns/coredns:1.8.0 +calico/cni:v3.20.0 +calico/kube-controllers:v3.20.0 +calico/node:v3.20.0 +calico/pod2daemon-flexvol:v3.20.0 +calico/typha:v3.20.0 kubesphere/flannel:v0.12.0 -coredns/coredns:1.6.9 -kubesphere/k8s-dns-node-cache:1.15.12 openebs/provisioner-localpv:2.10.1 openebs/linux-utils:2.10.0 -kubesphere/nfs-client-provisioner:v3.1.0-k8s1.11 -##csi-images -csiplugin/csi-neonsan:v1.2.0 -csiplugin/csi-neonsan-ubuntu:v1.2.0 -csiplugin/csi-neonsan-centos:v1.2.0 -csiplugin/csi-provisioner:v1.5.0 -csiplugin/csi-attacher:v2.1.1 -csiplugin/csi-resizer:v0.4.0 -csiplugin/csi-snapshotter:v2.0.1 -csiplugin/csi-node-driver-registrar:v1.2.0 -csiplugin/csi-qingcloud:v1.2.0 +kubesphere/k8s-dns-node-cache:1.15.12 ##kubesphere-images -kubesphere/ks-apiserver:v3.1.1 -kubesphere/ks-console:v3.1.1 -kubesphere/ks-controller-manager:v3.1.1 -kubesphere/ks-installer:v3.1.1 +kubesphere/ks-installer:v3.2.1 +kubesphere/ks-apiserver:v3.2.1 +kubesphere/ks-console:v3.2.1 +kubesphere/ks-controller-manager:v3.2.1 kubesphere/kubectl:v1.20.0 -kubesphere/kubectl:v1.19.0 -redis:5.0.12-alpine -alpine:3.14 -haproxy:2.0.22-alpine -nginx:1.14-alpine +kubesphere/kubefed:v0.8.1 +kubesphere/tower:v0.2.0 minio/minio:RELEASE.2019-08-07T01-59-21Z minio/mc:RELEASE.2019-08-07T23-14-43Z +csiplugin/snapshot-controller:v4.0.0 +kubesphere/nginx-ingress-controller:v0.48.1 mirrorgooglecontainers/defaultbackend-amd64:1.4 -kubesphere/nginx-ingress-controller:v0.35.0 -osixia/openldap:1.3.0 -csiplugin/snapshot-controller:v3.0.3 -kubesphere/kubefed:v0.7.0 -kubesphere/tower:v0.2.0 -kubesphere/prometheus-config-reloader:v0.42.1 -kubesphere/prometheus-operator:v0.42.1 -prom/alertmanager:v0.21.0 -prom/prometheus:v2.26.0 -prom/node-exporter:v0.18.1 -kubesphere/ks-alerting-migration:v3.1.0 -jimmidyson/configmap-reload:v0.3.0 -kubesphere/notification-manager-operator:v1.0.0 -kubesphere/notification-manager:v1.0.0 kubesphere/metrics-server:v0.4.2 +redis:5.0.14-alpine +haproxy:2.0.25-alpine +alpine:3.14 +osixia/openldap:1.3.0 +kubesphere/netshoot:v1.0 +##kubeedge-images +kubeedge/cloudcore:v1.7.2 +kubesphere/edge-watcher:v0.1.1 +kubesphere/edge-watcher-agent:v0.1.0 +##gatekeeper-images +openpolicyagent/gatekeeper:v3.5.2 +##openpitrix-images +kubesphere/openpitrix-jobs:v3.2.1 +##kubesphere-devops-images +kubesphere/devops-apiserver:v3.2.1 +kubesphere/devops-controller:v3.2.1 +kubesphere/devops-tools:v3.2.1 +kubesphere/ks-jenkins:v3.2.0-2.249.1 +jenkins/jnlp-slave:3.27-1 +kubesphere/builder-base:v3.2.0 +kubesphere/builder-nodejs:v3.2.0 +kubesphere/builder-maven:v3.2.0 +kubesphere/builder-python:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-go:v3.2.0 +kubesphere/builder-base:v3.2.0-podman +kubesphere/builder-nodejs:v3.2.0-podman +kubesphere/builder-maven:v3.2.0-podman +kubesphere/builder-python:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/builder-go:v3.2.0-podman +kubesphere/s2ioperator:v3.2.0 +kubesphere/s2irun:v3.2.0 +kubesphere/s2i-binary:v3.2.0 +kubesphere/tomcat85-java11-centos7:v3.2.0 +kubesphere/tomcat85-java11-runtime:v3.2.0 +kubesphere/tomcat85-java8-centos7:v3.2.0 +kubesphere/tomcat85-java8-runtime:v3.2.0 +kubesphere/java-11-centos7:v3.2.0 +kubesphere/java-8-centos7:v3.2.0 +kubesphere/java-8-runtime:v3.2.0 +kubesphere/java-11-runtime:v3.2.0 +kubesphere/nodejs-8-centos7:v3.2.0 +kubesphere/nodejs-6-centos7:v3.2.0 +kubesphere/nodejs-4-centos7:v3.2.0 +kubesphere/python-36-centos7:v3.2.0 +kubesphere/python-35-centos7:v3.2.0 +kubesphere/python-34-centos7:v3.2.0 +kubesphere/python-27-centos7:v3.2.0 +##kubesphere-monitoring-images +jimmidyson/configmap-reload:v0.3.0 +prom/prometheus:v2.26.0 +kubesphere/prometheus-config-reloader:v0.43.2 +kubesphere/prometheus-operator:v0.43.2 kubesphere/kube-rbac-proxy:v0.8.0 kubesphere/kube-state-metrics:v1.9.7 -openebs/provisioner-localpv:2.3.0 +prom/node-exporter:v0.18.1 +kubesphere/k8s-prometheus-adapter-amd64:v0.6.0 +prom/alertmanager:v0.21.0 thanosio/thanos:v0.18.0 grafana/grafana:7.4.3 +kubesphere/kube-rbac-proxy:v0.8.0 +kubesphere/notification-manager-operator:v1.4.0 +kubesphere/notification-manager:v1.4.0 +kubesphere/notification-tenant-sidecar:v3.2.0 ##kubesphere-logging-images -kubesphere/elasticsearch-oss:6.7.0-1 kubesphere/elasticsearch-curator:v5.7.6 -kubesphere/fluentbit-operator:v0.5.0 -kubesphere/fluentbit-operator:migrator -kubesphere/fluent-bit:v1.6.9 -elastic/filebeat:6.7.0 -kubesphere/kube-auditing-operator:v0.1.2 -kubesphere/kube-auditing-webhook:v0.1.2 -kubesphere/kube-events-exporter:v0.1.0 -kubesphere/kube-events-operator:v0.1.0 -kubesphere/kube-events-ruler:v0.2.0 -kubesphere/log-sidecar-injector:1.1 +kubesphere/elasticsearch-oss:6.7.0-1 +kubesphere/fluentbit-operator:v0.11.0 docker:19.03 +kubesphere/fluent-bit:v1.8.3 +kubesphere/log-sidecar-injector:1.1 +elastic/filebeat:6.7.0 +kubesphere/kube-events-operator:v0.3.0 +kubesphere/kube-events-exporter:v0.3.0 +kubesphere/kube-events-ruler:v0.3.0 +kubesphere/kube-auditing-operator:v0.2.0 +kubesphere/kube-auditing-webhook:v0.2.0 ##istio-images -istio/pilot:1.6.10 -istio/proxyv2:1.6.10 -jaegertracing/jaeger-agent:1.17 -jaegertracing/jaeger-collector:1.17 -jaegertracing/jaeger-es-index-cleaner:1.17 -jaegertracing/jaeger-operator:1.17.1 -jaegertracing/jaeger-query:1.17 -kubesphere/kiali:v1.26.1 -kubesphere/kiali-operator:v1.26.1 -##kubesphere-devops-images -kubesphere/ks-jenkins:2.249.1 -jenkins/jnlp-slave:3.27-1 -kubesphere/s2ioperator:v3.1.0 -kubesphere/s2irun:v2.1.1 -kubesphere/builder-base:v3.1.0 -kubesphere/builder-nodejs:v3.1.0 -kubesphere/builder-maven:v3.1.0 -kubesphere/builder-go:v3.1.0 -kubesphere/s2i-binary:v2.1.0 -kubesphere/tomcat85-java11-centos7:v2.1.0 -kubesphere/tomcat85-java11-runtime:v2.1.0 -kubesphere/tomcat85-java8-centos7:v2.1.0 -kubesphere/tomcat85-java8-runtime:v2.1.0 -kubesphere/java-11-centos7:v2.1.0 -kubesphere/java-8-centos7:v2.1.0 -kubesphere/java-8-runtime:v2.1.0 -kubesphere/java-11-runtime:v2.1.0 -kubesphere/nodejs-8-centos7:v2.1.0 -kubesphere/nodejs-6-centos7:v2.1.0 -kubesphere/nodejs-4-centos7:v2.1.0 -kubesphere/python-36-centos7:v2.1.0 -kubesphere/python-35-centos7:v2.1.0 -kubesphere/python-34-centos7:v2.1.0 -kubesphere/python-27-centos7:v2.1.0 -##openpitrix-images -kubespheredev/openpitrix-jobs:v3.1.1 -##weave-scope-images -weaveworks/scope:1.13.0 -##kubeedge-images -kubeedge/cloudcore:v1.6.2 -kubesphere/edge-watcher:v0.1.0 -kubesphere/kube-rbac-proxy:v0.5.0 -kubesphere/edge-watcher-agent:v0.1.0 -##example-images-images -kubesphere/examples-bookinfo-productpage-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v1:1.16.2 -kubesphere/examples-bookinfo-reviews-v2:1.16.2 -kubesphere/examples-bookinfo-reviews-v3:1.16.2 -kubesphere/examples-bookinfo-details-v1:1.16.2 -kubesphere/examples-bookinfo-ratings-v1:1.16.3 +istio/pilot:1.11.1 +istio/proxyv2:1.11.1 +jaegertracing/jaeger-operator:1.27 +jaegertracing/jaeger-agent:1.27 +jaegertracing/jaeger-collector:1.27 +jaegertracing/jaeger-query:1.27 +jaegertracing/jaeger-es-index-cleaner:1.27 +kubesphere/kiali-operator:v1.38.1 +kubesphere/kiali:v1.38 +##example-images busybox:1.31.1 +nginx:1.14-alpine joosthofman/wget:1.0 -kubesphere/netshoot:v1.0 nginxdemos/hello:plain-text wordpress:4.8-apache mirrorgooglecontainers/hpa-example:latest java:openjdk-8-jre-alpine fluent/fluentd:v1.4.2-2.0 perl:latest +kubesphere/examples-bookinfo-productpage-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v1:1.16.2 +kubesphere/examples-bookinfo-reviews-v2:1.16.2 +kubesphere/examples-bookinfo-details-v1:1.16.2 +kubesphere/examples-bookinfo-ratings-v1:1.16.3 +##weave-scope-images +weaveworks/scope:1.13.0 ``` diff --git a/content/zh/docs/installing-on-linux/introduction/kubekey.md b/content/zh/docs/installing-on-linux/introduction/kubekey.md index 8ebc95442..6d54790ec 100644 --- a/content/zh/docs/installing-on-linux/introduction/kubekey.md +++ b/content/zh/docs/installing-on-linux/introduction/kubekey.md @@ -18,7 +18,7 @@ KubeKey 的几种使用场景: ## KubeKey 如何运作 -下载 KubeKey 之后,您可以使用可执行文件 `kk` 来进行不同的操作。无论您是使用它来创建,扩缩还是升级集群,都必须事先使用 `kk` 准备配置文件。此配置文件包含集群的基本参数,例如主机信息、网络配置(CNI 插件以及 Pod 和 Service CIDR)、仓库镜像、插件(YAML 或 Chart)和可插拔组件选项(如果您安装 KubeSphere)。有关更多信息,请参见[示例配置文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +下载 KubeKey 之后,您可以使用可执行文件 `kk` 来进行不同的操作。无论您是使用它来创建,扩缩还是升级集群,都必须事先使用 `kk` 准备配置文件。此配置文件包含集群的基本参数,例如主机信息、网络配置(CNI 插件以及 Pod 和 Service CIDR)、仓库镜像、插件(YAML 或 Chart)和可插拔组件选项(如果您安装 KubeSphere)。有关更多信息,请参见[示例配置文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 准备好配置文件后,您需要使用 `./kk` 命令以及不同的标志来进行不同的操作。这之后,KubeKey 会自动安装 Docker,并拉取所有必要的镜像以进行安装。安装完成后,您还可以检查安装日志。 @@ -27,6 +27,7 @@ KubeKey 的几种使用场景: - 以前基于 ansible 的安装程序依赖于许多软件,例如 Python。KubeKey 由 Go 语言开发,可以消除在多种环境中出现的问题,确保成功安装。 - KubeKey 支持多种安装选项,例如 [All-in-One](../../../quick-start/all-in-one-on-linux/)、[多节点安装](../multioverview/)以及[离线安装](../air-gapped-installation/)。 - KubeKey 使用 Kubeadm 在节点上尽可能多地并行安装 Kubernetes 集群,使安装更简便,提高效率。与旧版的安装程序相比,它极大地节省了安装时间。 +- KubeKey 提供[内置高可用模式](../../high-availability-configurations/internal-ha-configuration/),支持一键安装高可用 Kubernetes 集群。 - KubeKey 旨在将集群作为对象来进行安装,即 CaaO。 ## 下载 KubeKey @@ -38,7 +39,7 @@ KubeKey 的几种使用场景: 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -54,7 +55,7 @@ export KKZONE=cn 运行以下命令来下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -69,21 +70,21 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上的命令,可以下载 KubeKey 的最新版本 (v1.1.1)。您可以更改命令中的版本号来下载特定的版本。 +通过以上的命令,可以下载 KubeKey 的最新版本 (v1.2.1)。您可以更改命令中的版本号来下载特定的版本。 {{}} ## 支持矩阵 -若需使用 KubeKey 来安装 Kubernetes 和 KubeSphere v3.1.1,请参见下表以查看所有受支持的 Kubernetes 版本。 +若需使用 KubeKey 来安装 Kubernetes 和 KubeSphere 3.2.1,请参见下表以查看所有受支持的 Kubernetes 版本。 | KubeSphere 版本 | 受支持的 Kubernetes 版本 | | --------------- | ------------------------------------------------------------ | -| v3.1.1 | v1.17.0,v1.17.4,v1.17.5,v1.17.6,v1.17.7,v1.17.8,v1.17.9,v1.18.3,v1.18.5,v1.18.6,v1.18.8,v1.19.0,v1.19.8,v1.19.9,v1.20.4, v1.20.6 | +| v3.2.1 | v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持) | {{< notice note >}} - 您也可以运行 `./kk version --show-supported-k8s`,查看能使用 KubeKey 安装的所有受支持的 Kubernetes 版本。 -- 能使用 KubeKey 安装的 Kubernetes 版本与 KubeSphere v3.0.0 支持的 Kubernetes 版本不同。如需[在现有 Kubernetes 集群上安装 KubeSphere v3.1.1](../../../installing-on-kubernetes/introduction/overview/),您的 Kubernetes 版本必须为 v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 +- 能使用 KubeKey 安装的 Kubernetes 版本与 KubeSphere v3.0.0 支持的 Kubernetes 版本不同。如需[在现有 Kubernetes 集群上安装 KubeSphere 3.2.1](../../../installing-on-kubernetes/introduction/overview/),您的 Kubernetes 版本必须为 v1.17.x,v1.18.x,v1.19.x 或 v1.20.x。 {{}} \ No newline at end of file diff --git a/content/zh/docs/installing-on-linux/introduction/multioverview.md b/content/zh/docs/installing-on-linux/introduction/multioverview.md index 0c34aa5c5..526bb3ffd 100644 --- a/content/zh/docs/installing-on-linux/introduction/multioverview.md +++ b/content/zh/docs/installing-on-linux/introduction/multioverview.md @@ -102,7 +102,7 @@ KubeKey 可以一同安装 Kubernetes 和 KubeSphere。根据要安装的 Kubern 从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -118,7 +118,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -133,7 +133,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -157,7 +157,7 @@ chmod +x kk {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 @@ -172,7 +172,7 @@ chmod +x kk ./kk create config [-f ~/myfolder/abc.yaml] ``` -- 您可以指定要安装的 KubeSphere 版本(例如 `--with-kubesphere v3.1.1`)。 +- 您可以指定要安装的 KubeSphere 版本(例如 `--with-kubesphere v3.2.1`)。 ```bash ./kk create config --with-kubesphere [version] @@ -205,7 +205,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 ``` #### 主机 @@ -280,7 +280,7 @@ KubeSphere 会默认安装 [OpenEBS](https://openebs.io/),为开发和测试 {{< notice tip >}} - 您可以编辑配置文件,启用多集群功能。有关更多信息,请参见[多集群管理](../../../multicluster-management/)。 -- 您也可以选择要安装的组件。有关更多信息,请参见[启用可插拔组件](../../../pluggable-components/)。有关完整的 `config-sample.yaml` 文件的示例,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +- 您也可以选择要安装的组件。有关更多信息,请参见[启用可插拔组件](../../../pluggable-components/)。有关完整的 `config-sample.yaml` 文件的示例,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 {{}} @@ -334,8 +334,6 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx {{}} -![登录](/images/docs/zh-cn/installing-on-linux/introduction/multi-node-installation/login.PNG) - ## 启用 kubectl 自动补全 KubeKey 不会启用 kubectl 自动补全功能,请参见以下内容并将其打开: diff --git a/content/zh/docs/installing-on-linux/introduction/vars.md b/content/zh/docs/installing-on-linux/introduction/vars.md index c6394b75f..e2fe8df29 100644 --- a/content/zh/docs/installing-on-linux/introduction/vars.md +++ b/content/zh/docs/installing-on-linux/introduction/vars.md @@ -10,7 +10,7 @@ weight: 3160 ```yaml kubernetes: - version: v1.19.8 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local masqueradeAll: false @@ -45,7 +45,7 @@ weight: 3160 version - Kubernetes 安装版本。如未指定 Kubernetes 版本,{{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v1.1.0 默认安装 Kubernetes v1.19.8。有关更多信息,请参阅{{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "支持矩阵" >}}。 + Kubernetes 安装版本。如未指定 Kubernetes 版本,{{< contentLink "docs/installing-on-linux/introduction/kubekey" "KubeKey" >}} v1.2.1 默认安装 Kubernetes v1.21.5。有关更多信息,请参阅{{< contentLink "docs/installing-on-linux/introduction/kubekey/#support-matrix" "支持矩阵" >}}。 imageRepo @@ -123,8 +123,8 @@ weight: 3160 {{< notice note >}} - \*默认情况下,KubeKey 不会在配置文件中定义这些参数,您可以手动添加这些参数并自定义其值。 -- `addons` 用于安装云原生扩展 (Addon)(YAML 或 Chart)。有关详细信息,请参阅此[文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/addons.md)。 -- 此页面仅列出 KubeKey 创建的配置文件中的部分参数。有关其他参数的详细信息,请参阅此[示例文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +- `addons` 用于安装云原生扩展 (Addon)(YAML 或 Chart)。有关详细信息,请参阅此[文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/addons.md)。 +- 此页面仅列出 KubeKey 创建的配置文件中的部分参数。有关其他参数的详细信息,请参阅此[示例文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 {{}} diff --git a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md index 0f85e50d8..6fa408b6b 100644 --- a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md +++ b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-and-k3s.md @@ -32,7 +32,7 @@ weight: 3530 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接运行以下命令: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -48,7 +48,7 @@ export KKZONE=cn 运行以下命令来下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -63,7 +63,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上的命令可以下载 KubeKey 的最新版本 (v1.1.0)。请注意,更早版本的 KubeKey 无法下载 K3s。 +通过以上的命令可以下载 KubeKey 的最新版本 (v1.2.1)。请注意,更早版本的 KubeKey 无法下载 K3s。 {{}} @@ -78,12 +78,12 @@ chmod +x kk 1. 执行以下命令为集群创建一个配置文件: ```bash - ./kk create config --with-kubernetes v1.20.4-k3s --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.4-k3s --with-kubesphere v3.2.1 ``` {{< notice note >}} - - KubeKey v1.1.0 仅支持安装 K3s v1.20.4。 + - KubeKey v1.2.1 支持安装 K3s v1.21.4。 - 您可以在以上命令中使用 `-f` 或 `--file` 参数指定配置文件的路径和名称。如未指定路径和名称,KubeKey 将默认在当前目录下创建 `config-sample.yaml` 配置文件。 @@ -117,7 +117,7 @@ chmod +x kk address: "" port: 6443 kubernetes: - version: v1.20.4-k3s + version: v1.21.4-k3s imageRepo: kubesphere clusterName: cluster.local network: @@ -133,7 +133,7 @@ chmod +x kk {{< notice note >}} - 有关配置文件中每个字段的更多信息,请参阅[示例文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 + 有关配置文件中每个字段的更多信息,请参阅[示例文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 {{}} @@ -176,12 +176,8 @@ chmod +x kk 5. 从安装日志的 `Console`、`Account` 和 `Password` 参数分别获取 KubeSphere Web 控制台的地址、系统管理员用户名和系统管理员密码,并使用 Web 浏览器登录 KubeSphere Web 控制台。 - ![cluster-management](/images/docs/zh-cn/installing-on-linux/on-premises/cluster-management.png) - - ![service-components](/images/docs/zh-cn/installing-on-linux/on-premises/service-components.png) - {{< notice note >}} - + 您可以在安装后启用 KubeSphere 的可插拔组件,但由于在 KubeSphere 上部署 K3s 目前处于测试阶段,某些功能可能不兼容。 {{}} diff --git a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md index 8fabef8de..c8ebee21f 100644 --- a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md +++ b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-bare-metal.md @@ -200,7 +200,7 @@ yum install conntrack-tools 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或使用以下命令: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -216,7 +216,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -231,7 +231,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -245,15 +245,15 @@ chmod +x kk 您可用使用 KubeKey 同时安装 Kubernetes 和 KubeSphere,通过自定义配置文件中的参数创建多节点集群。 -创建安装有 KubeSphere 的 Kubernetes 集群(例如使用 `--with-kubesphere v3.1.1`): +创建安装有 KubeSphere 的 Kubernetes 集群(例如使用 `--with-kubesphere v3.2.1`): ```bash -./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装 KubeSphere,或者在您后续使用 `./kk create cluster` 命令时再次添加该标志。 - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 @@ -287,7 +287,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 ``` 执行以下命令使用自定义的配置文件创建集群: @@ -395,4 +395,4 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx ```bash reboot - ``` \ No newline at end of file + ``` diff --git a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md index 50f237aa9..79e2fbd62 100644 --- a/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md +++ b/content/zh/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md @@ -288,7 +288,7 @@ systemctl status -l keepalived 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -304,7 +304,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -319,7 +319,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -338,12 +338,12 @@ chmod +x kk 创建配置文件(一个示例配置文件)。 ```bash -./kk create config --with-kubernetes v1.19.8 --with-kubesphere v3.1.1 +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 @@ -387,9 +387,9 @@ spec: domain: lb.kubesphere.local # vip address: "10.10.71.67" - port: "6443" + port: 6443 kubernetes: - version: v1.19.8 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false] @@ -463,10 +463,6 @@ https://kubesphere.io 2020-08-15 23:32:12 使用上述日志中给定的访问地址进行访问,进入到 KubeSphere 的登录界面并使用默认帐户(用户名`admin`,密码`P@88w0rd`)即可登录平台。 -![登录](/images/docs/vsphere/login.png) - -![默认界面](/images/docs/vsphere/default.png) - ## 开启可插拔功能组件(可选) 上面的示例演示了默认的最小安装过程,对于可插拔组件,可以在安装之前或之后启用它们。有关详细信息,请参见[启用可插拔组件](../../../pluggable-components/)。 diff --git a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md index b7ee3596f..11c0412ff 100644 --- a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md +++ b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-glusterfs.md @@ -119,7 +119,7 @@ weight: 3340 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -135,7 +135,7 @@ export KKZONE=cn 运行以下命令来下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -150,7 +150,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上的命令,可以下载 KubeKey 的最新版本 (v1.1.1)。您可以更改命令中的版本号来下载特定的版本。 +通过以上的命令,可以下载 KubeKey 的最新版本 (v1.2.1)。您可以更改命令中的版本号来下载特定的版本。 {{}} @@ -165,12 +165,12 @@ chmod +x kk 1. 指定想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + - 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 @@ -203,9 +203,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -286,10 +286,8 @@ glusterfs (default) kubernetes.io/glusterfs Delete Immediate 1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 -3. 访问**存储管理**下的**存储卷**,可以看到 PVC 正在使用。 +3. 访问**存储**下的**存储卷**,可以看到 PVC 正在使用。 - ![volumes-in-use](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png) - {{< notice note >}} 有关如何在 KubeSphere 控制台上创建存储卷的更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。 @@ -298,5 +296,3 @@ glusterfs (default) kubernetes.io/glusterfs Delete Immediate 3. 在**存储类型**页面,可以看到集群中可用的存储类型。 - ![storage-class-available](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png) - diff --git a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md index 7abba6bb5..4c45a9bd8 100644 --- a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md +++ b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-nfs-client.md @@ -71,7 +71,7 @@ weight: 3330 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -87,7 +87,7 @@ export KKZONE=cn 运行以下命令来下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -102,7 +102,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上命令,可以下载 KubeKey 的最新版本 (v1.1.1)。您可以更改命令中的版本号来下载特定的版本。 +通过以上命令,可以下载 KubeKey 的最新版本 (v1.2.1)。您可以更改命令中的版本号来下载特定的版本。 {{}} @@ -117,12 +117,12 @@ chmod +x kk 1. 指定您想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + - 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 @@ -155,9 +155,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -259,14 +259,10 @@ chmod +x kk 1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 访问**应用负载**中的**容器组**,从下拉菜单中选择 `kube-system`,可以看到 `nfs-client` 的 Pod 正常运行。 +2. 选择**应用负载** > **容器组**,从下拉菜单中选择 `kube-system`,可以看到 `nfs-client` 的 Pod 正常运行。 - ![nfs-pod](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png) +3. 选择**存储** > **存储类型**,可以看到集群中可用的存储类型。 -3. 访问**存储管理**下的**存储类型**,可以看到集群中可用的存储类型。 - - ![nfs-storage-class](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png) - {{< notice note >}} 有关如何在 KubeSphere 控制台上创建存储卷的更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。 diff --git a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md index 516df2a42..ef639e726 100644 --- a/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md +++ b/content/zh/docs/installing-on-linux/persistent-storage-configurations/install-qingcloud-csi.md @@ -73,7 +73,7 @@ weight: 3320 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或者直接运行以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -89,7 +89,7 @@ export KKZONE=cn 运行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -104,7 +104,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -通过以上的命令,可以下载 KubeKey 的最新版本 (v1.1.1)。您可以更改命令中的版本号来下载特定的版本。 +通过以上的命令,可以下载 KubeKey 的最新版本 (v1.2.1)。您可以更改命令中的版本号来下载特定的版本。 {{}} @@ -119,12 +119,12 @@ chmod +x kk 1. 指定您想要安装的 Kubernetes 版本和 KubeSphere 版本,例如: ```bash - ./kk create config --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 + ./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} - - 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + - 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在此步骤的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 - 如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 @@ -157,9 +157,9 @@ chmod +x kk controlPlaneEndpoint: domain: lb.kubesphere.local address: "" - port: "6443" + port: 6443 kubernetes: - version: v1.20.4 + version: v1.21.5 imageRepo: kubesphere clusterName: cluster.local network: @@ -263,14 +263,10 @@ chmod +x kk 1. 使用默认帐户和密码 (`admin/P@88w0rd`) 通过 `:30880` 登录 Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 访问**工作负载**中的**容器组**,从下拉菜单中选择 `kube-system`。可以看到 `csi-qingcloud` 的 Pod 正常运行。 +2. 选择**应用负载** > **容器组**,从下拉菜单中选择 `kube-system`。可以看到 `csi-qingcloud` 的 Pod 正常运行。 - ![qingcloud-csi-pod](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png) +3. 选择**存储**下的**存储类型**,可以看到集群中可用的存储类型。 -3. 访问**存储管理**下的**存储类型**,可以看到集群中可用的存储类型。 - - ![qingcloud-csi-storage-class](/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png) - {{< notice note >}} 有关如何在 KubeSphere 控制台创建存储卷的更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。 diff --git a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md index 2e0cc9619..de9ebee20 100644 --- a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md +++ b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs.md @@ -60,7 +60,7 @@ Weight: 3240 controlPlaneEndpoint: domain: lb.kubesphere.local address: "39.104.82.170" - port: "6443" + port: 6443 ``` ### 配置SLB 主机实例 @@ -91,7 +91,7 @@ controlPlaneEndpoint: 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -107,7 +107,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -122,7 +122,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -141,7 +141,7 @@ chmod +x kk 在当前位置创建配置文件 `config-sample.yaml`: ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.17.9 -f config-sample.yaml +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 -f config-sample.yaml ``` > 提示:默认是 Kubernetes 1.17.9,这些 Kubernetes 版本也与 KubeSphere 同时进行过充分的测试: v1.15.12, v1.16.13, v1.17.9 (default), v1.18.6,您可以根据需要指定版本。 @@ -186,7 +186,7 @@ metadata: controlPlaneEndpoint: domain: lb.kubesphere.local address: "39.104.82.170" - port: "6443" + port: 6443 kubernetes: version: v1.17.9 imageRepo: kubesphere @@ -262,13 +262,9 @@ https://kubesphere.io 2020-08-24 23:30:06 ## 如何自定义开启可插拔组件 -- 点击 `集群管理` - `自定义资源CRD` ,在过滤条件框输入 `ClusterConfiguration` ,如图: +- 点击**集群管理** > **CRD**,在过滤条件框输入 `ClusterConfiguration`。 -![修改KsInstaller](/images/docs/ali-ecs/update_crd.png) - -- 点击 `ClusterConfiguration` 详情,对 `ks-installer` 编辑保存退出即可,组件描述介绍:[文档说明](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) - -![修改KsInstaller](/images/docs/ali-ecs/ks-install-source.png) +- 点击 `ClusterConfiguration` 详情,对 `ks-installer` 编辑保存退出即可,组件描述介绍:[文档说明](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml)。 ## FAQ diff --git a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md index ef762e0af..b670eaa77 100644 --- a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md +++ b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms.md @@ -102,7 +102,7 @@ ssh -i .ssh/id_rsa2 -p50200 kubesphere@40.81.5.xx 从 KubeKey 的 [Github 发布页面](https://github.com/kubesphere/kubekey/releases)下载,或执行以下命令: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -118,7 +118,7 @@ export KKZONE=cn 运行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -133,7 +133,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -上面的命令会下载 KubeKey 最新版本 (v1.1.1)。您可以在命令中更改版本号以下载特定版本。 +上面的命令会下载 KubeKey 最新版本 (v1.2.1)。您可以在命令中更改版本号以下载特定版本。 {{}} @@ -145,15 +145,15 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - -2. 使用默认配置创建示例配置文件,这里以 Kubernetes v1.20.4 为例。 +2. 使用默认配置创建示例配置文件,这里以 Kubernetes v1.21.5 为例。 ```bash - ./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 + ./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- KubeSphere v3.1.1 对应 Kubernetes 版本推荐:v1.17.9、v1.18.8、v1.19.8 和 v1.20.4。如果未指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关支持的 Kubernetes 版本请参阅[支持矩阵](../../../installing-on-linux/introduction/kubekey/#support-matrix)。 +- KubeSphere 3.2.1 对应 Kubernetes 版本推荐:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果未指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关支持的 Kubernetes 版本请参阅[支持矩阵](../../../installing-on-linux/introduction/kubekey/#support-matrix)。 - 如果在此步骤中的命令中未添加标志 `--with-kubesphere`,则不会部署 KubeSphere,除非您使用配置文件中的 `addons` 字段进行安装,或稍后使用 `./kk create cluster` 时再次添加此标志。 - 如果在未指定 KubeSphere 版本的情况下添加标志 --with kubesphere`,将安装 KubeSphere 的最新版本。 @@ -186,7 +186,7 @@ spec: - node000002 ``` -有关更多信息,请参阅[文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +有关更多信息,请参阅[文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 ### 配置负载均衡器 @@ -198,7 +198,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "40.81.5.xx" - port: "6443" + port: 6443 ``` {{< notice note >}} diff --git a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md index 8ae38dc33..9877b76e8 100644 --- a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md +++ b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs.md @@ -72,7 +72,7 @@ Kubernetes 服务需要做到高可用,需要保证 kube-apiserver 的 HA , controlPlaneEndpoint: domain: lb.kubesphere.local address: "192.168.1.8" - port: "6443" + port: 6443 ``` ### 获取安装程序可执行文件 @@ -85,7 +85,7 @@ Kubernetes 服务需要做到高可用,需要保证 kube-apiserver 的 HA , 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -101,7 +101,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -116,7 +116,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -137,7 +137,7 @@ chmod +x kk 在当前位置创建配置文件 `master-HA.yaml`: ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.17.9 -f master-HA.yaml +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 -f master-HA.yaml ``` > 提示:默认是 Kubernetes 1.17.9,这些 Kubernetes 版本也与 KubeSphere 同时进行过充分的测试: v1.15.12, v1.16.13, v1.17.9 (default), v1.18.6,您可以根据需要指定版本。 @@ -169,7 +169,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "192.168.1.8" - port: "6443" + port: 6443 kubernetes: version: v1.17.9 imageRepo: kubesphere @@ -202,7 +202,7 @@ metadata: name: ks-installer namespace: kubesphere-system labels: - version: v3.1.1 + version: v3.2.1 spec: local_registry: "" persistence: @@ -227,10 +227,10 @@ spec: elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log - # externalElasticsearchUrl: + # externalElasticsearchHost: # externalElasticsearchPort: console: - enableMultiLogin: false # enable/disable multiple sing on, it allows an account can be used by different users at the same time. + enableMultiLogin: false # enable/disable multiple sing on, it allows a user can be used by different users at the same time. port: 30880 alerting: # Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. enabled: true @@ -280,7 +280,7 @@ spec: ```bash # 指定配置文件创建集群 - ./kk create cluster --with-kubesphere v3.1.1 -f master-HA.yaml + ./kk create cluster --with-kubesphere v3.2.1 -f master-HA.yaml # 查看 KubeSphere 安装日志 -- 直到出现控制台的访问地址和登录帐户 kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f @@ -313,7 +313,5 @@ https://kubesphere.io 2020-08-28 01:25:54 ## 如何自定义开启可插拔组件 -点击 `集群管理` - `自定义资源CRD` ,在过滤条件框输入 `ClusterConfiguration` ,如图下 -![5-1-自定义组件](/images/docs/huawei-ecs/huawei-crds-config.png) +点击**集群管理** > **CRD**,在过滤条件框输入 `ClusterConfiguration`。 点击 `ClusterConfiguration` 详情,对 `ks-installer` 编辑保存退出即可,组件描述介绍:[文档说明](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml)。 -![5-2-自定义组件](/images/docs/huawei-ecs/huawei-crds-edit-yaml.png) diff --git a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md index 7669316b9..5a3a08ffd 100644 --- a/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md +++ b/content/zh/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms.md @@ -126,7 +126,7 @@ Weight: 3420 从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -142,7 +142,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey: ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -157,7 +157,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -167,15 +167,15 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - chmod +x kk ``` -创建包含默认配置的示例配置文件。以下以 Kubernetes v1.20.4 为例。 +创建包含默认配置的示例配置文件。以下以 Kubernetes v1.21.5 为例。 ```bash -./kk create config --with-kubesphere v3.1.1 --with-kubernetes v1.20.4 +./kk create config --with-kubesphere v3.2.1 --with-kubernetes v1.21.5 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 或 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,只能使用配置文件中的 `addons` 字段安装,或者在您后续使用 `./kk create cluster` 命令时再次添加这个标志。 @@ -222,7 +222,7 @@ spec: - node3 ``` -有关完整的配置示例说明,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +有关完整的配置示例说明,请参见[此文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 ### 步骤 4:配置负载均衡器 @@ -236,7 +236,7 @@ spec: controlPlaneEndpoint: domain: lb.kubesphere.local address: "192.168.0.253" - port: "6443" + port: 6443 ``` {{< notice note >}} @@ -326,8 +326,6 @@ https://kubesphere.io 2020-08-13 10:50:24 进入 KubeSphere 的 Web 控制台,您也可以看到所有节点运行正常。 -![cluster-node](/images/docs/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png) - 为验证集群的高可用性,可关闭一台主机进行测试。例如,上面的控制台可通过 `IP:30880` 地址访问(此处 IP 地址为绑定到外部负载均衡器的 EIP 地址)。如果集群的高可用性正常,在您关闭一台主节点后,控制台应该仍能正常工作。 ## 另请参见 diff --git a/content/zh/docs/introduction/features.md b/content/zh/docs/introduction/features.md index 36b79ab34..b53d3f0bf 100644 --- a/content/zh/docs/introduction/features.md +++ b/content/zh/docs/introduction/features.md @@ -9,9 +9,9 @@ weight: 1300 ## 概览 -KubeSphere 作为开源的企业级全栈化容器平台,为用户提供了一个健壮、安全、功能丰富、具备极致体验的 Web 控制台。拥有企业级 Kubernetes 所需的最常见的功能,如工作负载管理,网络策略配置,微服务治理(基于 Istio),DevOps 工程 (CI/CD) ,安全管理,Source to Image/Binary to Image,多租户管理,多维度监控,日志查询和收集,告警通知,审计,应用程序管理和镜像管理、应用配置密钥管理等功能模块。 +KubeSphere 作为开源的企业级全栈化容器平台,为用户提供了一个健壮、安全、功能丰富、具备极致体验的 Web 控制台。拥有企业级 Kubernetes 所需的最常见的功能,如工作负载管理,网络策略配置,微服务治理(基于 Istio),DevOps 项目 (CI/CD) ,安全管理,Source to Image/Binary to Image,多租户管理,多维度监控,日志查询和收集,告警通知,审计,应用程序管理和镜像管理、应用配置密钥管理等功能模块。 -它还支持各种开源存储和网络解决方案以及云存储服务。例如,KubeSphere 为用户提供了功能强大的云原生工具[负载均衡器插件 PorterLB](https://porterlb.io/),这是为 Kubernetes 集群开发的 CNCF 认证的负载均衡插件。 +它还支持各种开源存储和网络解决方案以及云存储服务。例如,KubeSphere 为用户提供了功能强大的云原生工具[负载均衡器插件 OpenELB](https://openelb.github.io/),这是为 Kubernetes 集群开发的 CNCF 认证的负载均衡插件。 有了易于使用的图形化 Web 控制台,KubeSphere 简化了用户的学习曲线并推动了更多的企业使用 Kubernetes 。 @@ -59,7 +59,7 @@ KubeSphere 提供了基于 Jenkins 的可视化 CI/CD 流水线编辑,无需 - **自动化**:CI/CD 流水线和构建策略均基于 Jenkins,可简化和自动化开发、测试和生产过程。缓存依赖项用于加速构建和部署。 - **开箱即用**:用户可以基于他们的 Jenkins 构建策略和客户端插件来创建基于 Git repository/SVN 的 Jenkins 流水线。在内置的 Jenkinsfile 中定义任何步骤和阶段。支持常见的代理类型,例如 Maven,Node.js 和 Go。用户也可以自定义代理类型。 - **可视化**:用户可以轻松地与可视化控制面板进行交互,编辑、管理 CI/CD 流水线。 -- **质量管理**:支持通过静态代码分析扫描来检测DevOps 工程中的 bug、代码错误和安全漏洞。 +- **质量管理**:支持通过静态代码分析扫描来检测DevOps 项目中的 bug、代码错误和安全漏洞。 - **日志**:日志完整记录 CI/CD 流水线运行全过程。 ### Source-to-Image @@ -122,7 +122,7 @@ KubeSphere 通过可视化界面操作监控、运维功能,可简化操作和 - **第三方兼容性**:KubeSphere 与 Prometheus 兼容,后者是用于在 Kubernetes 环境中进行监视的事实指标收集平台。监视数据可以在 KubeSphere 的 Web 控制台中无缝显示。 - **二级精度的多维度监控**: - - 在集群资源维度,系统提供了全面的指标,例如 CPU 利用率、内存利用率、CPU 平均负载、磁盘使用量、inode 使用率、磁盘吞吐量、IOPS、网卡速率、容器组运行状态、ETCD 监控、API Server 监控等多项指标。 + - 在集群资源维度,系统提供了全面的指标,例如 CPU 利用率、内存利用率、CPU 平均负载、磁盘使用量、inode 使用率、磁盘吞吐量、IOPS、网卡速率、容器组运行状态、etcd 监控、API Server 监控等多项指标。 - 在应用资源维度,提供针对应用的 CPU 用量、内存用量、容器组数量、网络流出速率、网络流入速率等五项监控指标。并支持按用量排序和自定义时间范围查询,快速定位异常提供按节点、企业空间、项目等资源用量排行。 - **排序**:用户可以按节点,工作空间和项目对数据进行排序,从而以直观的方式为他们的资源运行提供图形化视图。 @@ -160,7 +160,7 @@ KubeSphere 通过可视化界面操作监控、运维功能,可简化操作和 - 支持 Calico、Flannel 等开源网络方案。 -- [PorterLB](https://github.com/kubesphere/porter),是由 KubeSphere 开发团队设计、经过 CNCF 认证的一款适用于物理机部署 Kubernetes 的负载均衡插件。 主要特点: +- [OpenELB](https://github.com/kubesphere/openelb),是由 KubeSphere 开发团队设计、经过 CNCF 认证的一款适用于物理机部署 Kubernetes 的负载均衡插件。 主要特点: 1. ECMP 路由负载均衡 2. BGP 动态路由 @@ -170,4 +170,4 @@ KubeSphere 通过可视化界面操作监控、运维功能,可简化操作和 6. 通过 CRD 动态配置BGP服务器 (v0.3.0) 7. 通过 CRD 动态配置BGP对等 (v0.3.0) - 有关 PorterLB 的更多信息,请参见[本文](https://kubesphere.io/conferences/porter/)。 + 有关 OpenELB 的更多信息,请参见[本文](https://kubesphere.io/conferences/porter/)。 diff --git a/content/zh/docs/introduction/scenarios.md b/content/zh/docs/introduction/scenarios.md index 4f926caf6..4be36d6fc 100644 --- a/content/zh/docs/introduction/scenarios.md +++ b/content/zh/docs/introduction/scenarios.md @@ -99,6 +99,6 @@ DevOps 是一套重要的实践和方法,让开发和运维团队能够更高 有时,云端并非资源部署的最优环境。例如,当需要大量计算资源并要求硬盘高 I/O 速度时,使用专门的物理服务器可以实现更佳的性能。此外,对于一些难以迁移上云的特殊工作负载,可能还需要通过经认证的硬件运行,加以复杂的许可与支持协议,在这种情况下,企业更倾向于使用裸机环境部署应用。 -借助新一代轻量级安装器 [KubeKey](https://github.com/kubesphere/kubekey),KubeSphere 帮助企业快速在裸机环境搭建容器化架构,并通过 PorterLB 实现流量的负载均衡。[PorterLB](https://github.com/kubesphere/porter) 由 KubeSphere 社区开源,专为裸机环境下的负载均衡所设计,现已加入 CNCF Landscape,是为 CNCF 所认可的构建云原生最佳实践中的重要一环。 +借助新一代轻量级安装器 [KubeKey](https://github.com/kubesphere/kubekey),KubeSphere 帮助企业快速在裸机环境搭建容器化架构,并通过 OpenELB 实现流量的负载均衡。[OpenELB](https://github.com/kubesphere/openelb) 由 KubeSphere 社区开源,专为裸机环境下的负载均衡所设计,现已加入 CNCF Landscape,是为 CNCF 所认可的构建云原生最佳实践中的重要一环。 有关 KubeSphere 如何推动各行各业的发展并实现数字化转型,请参见[用户案例学习](../../../case/)。 \ No newline at end of file diff --git a/content/zh/docs/introduction/what's-new-in-3.1.0.md b/content/zh/docs/introduction/what's-new-in-3.1.0.md deleted file mode 100644 index de2a26d97..000000000 --- a/content/zh/docs/introduction/what's-new-in-3.1.0.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "3.1.0 新特性" -keywords: 'Kubernetes, KubeSphere, 介绍' -description: '3.1.0 新增了对 “边缘计算” 场景的支持。同时在 3.0.0 的基础上新增了计量计费,让基础设施的运营成本更清晰,并进一步优化了在 “多云、多集群、多团队、多租户” 等应用场景下的使用体验' -linkTitle: "3.1.0 新特性" -weight: 1400 ---- - -KubeSphere 3.1.0 于 2021 年 4 月底发布,新增了对 “边缘计算” 场景的支持。同时在 3.0.0 的基础上新增了计量计费,让基础设施的运营成本更清晰,并进一步优化了在 “多云、多集群、多团队、多租户” 等应用场景下的使用体验,增强了 “多集群管理、多租户管理、可观测性、DevOps、应用商店、微服务治理” 等特性,更进一步完善交互设计提升了用户体验。 - -关于 3.1.0 新特性的详细解读,可参考博客 [KubeSphere 3.1.0 GA:混合多云走向边缘,让应用无处不在](../../../blogs/kubesphere-3.1.0-ga-announcement/)。 - -关于 3.1.0 的新功能及增强、Bug 修复、重要的技术调整,以及废弃或移除的功能,请参见 3.1.0 的[发行说明](../../release/release-v310/)。 diff --git a/content/zh/docs/introduction/what's-new-in-3.2.x.md b/content/zh/docs/introduction/what's-new-in-3.2.x.md new file mode 100644 index 000000000..b286780ee --- /dev/null +++ b/content/zh/docs/introduction/what's-new-in-3.2.x.md @@ -0,0 +1,15 @@ +--- +title: "3.2.x 重要更新" +keywords: 'Kubernetes, KubeSphere, 介绍' +description: '3.2.x 新增了对 “边缘计算” 场景的支持。同时在 3.0.0 的基础上新增了计量计费,让基础设施的运营成本更清晰,并进一步优化了在 “多云、多集群、多团队、多租户” 等应用场景下的使用体验' +linkTitle: "3.2.x 重要更新" +weight: 1400 +--- + +2021 年 11 月 2 日,KubeSphere 3.2.0 正式发布,带来面向 AI 场景的 GPU 调度与更灵活的网关,新增了对 GPU 资源调度管理与 GPU 使用监控的支持,进一步增强了在云原生 AI 场景的使用体验。同时还增强了 “多集群管理、多租户管理、可观测性、DevOps、应用商店、微服务治理” 等特性,更进一步完善交互设计,并全面提升了用户体验。 + +关于 3.2.0 新特性的详细解读,可参考博客 [KubeSphere 3.2.0 发布:带来面向 AI 场景的 GPU 调度与更灵活的网关](../../../blogs/kubesphere-3.2.0-ga-announcement/)。 + +关于 3.2.0 的新功能及增强、Bug 修复、重要的技术调整,以及废弃或移除的功能,请参见 3.2.0 的[发行说明](../../release/release-v320/)。 + +2021 年 12 月 21 日,KubeSphere 社区发布了 3.2.1,修复了多个功能的已知 Bug,并对多项功能进行优化,带来更好的用户体验,查看 [3.2.1 发行说明](../../release/release-v320/) 了解详情。 \ No newline at end of file diff --git a/content/zh/docs/multicluster-management/enable-multicluster/agent-connection.md b/content/zh/docs/multicluster-management/enable-multicluster/agent-connection.md index aa2171458..905af4e3f 100644 --- a/content/zh/docs/multicluster-management/enable-multicluster/agent-connection.md +++ b/content/zh/docs/multicluster-management/enable-multicluster/agent-connection.md @@ -6,13 +6,13 @@ linkTitle: "代理连接" weight: 5220 --- -KubeSphere 的组件 [Tower](https://github.com/kubesphere/tower) 用于代理连接。Tower 是一种通过代理在集群间建立网络连接的工具。如果 Host 集群(简称 H 集群)无法直接访问 Member 集群(简称 M 集群),您可以暴露 H 集群的代理服务地址,这样可以让 M 集群通过代理连接到 H 集群。当 M 集群部署在私有环境(例如 IDC)并且 H 集群可以暴露代理服务时,适用此连接方法。当您的集群分布部署在不同的云厂商上时,同样适用代理连接的方法。 +KubeSphere 的组件 [Tower](https://github.com/kubesphere/tower) 用于代理连接。Tower 是一种通过代理在集群间建立网络连接的工具。如果主集群无法直接访问成员集群,您可以暴露主集群的代理服务地址,这样可以让成员集群通过代理连接到主集群。当成员集群部署在私有环境(例如 IDC)并且主集群可以暴露代理服务时,适用此连接方法。当您的集群分布部署在不同的云厂商上时,同样适用代理连接的方法。 -要通过代理连接使用多集群功能,您必须拥有至少两个集群,分别用作 H 集群和 M 集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为 H 集群或 M 集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 +要通过代理连接使用多集群功能,您必须拥有至少两个集群,分别用作主集群和成员集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为主集群或成员集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 -## 准备 Host 集群 +## 准备主集群 -Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集群。 +主集群为您提供中央控制平面,并且您只能指定一个主集群。 {{< tabs >}} @@ -22,7 +22,7 @@ Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集 - 选项 A - 使用 Web 控制台: - 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**自定义资源 CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的 **CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 - 选项 B - 使用 Kubectl: @@ -30,29 +30,60 @@ Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集 kubectl edit cc ks-installer -n kubesphere-system ``` -在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**更新**(如果使用 Web 控制台)使其生效: +在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**确定**(如果使用 Web 控制台)使其生效: ```yaml multicluster: clusterRole: host ``` +要设置主集群名称,请在 `ks-installer` 的 YAML 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 建议您在准备主集群的同时设置主集群名称。若您的主集群已在运行并且已经部署过资源,不建议您再去设置主集群名称。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + 您需要**稍等片刻**待该更改生效。 {{}} {{< tab "尚未安装 KubeSphere" >}} -在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个 Host 集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置一个 Host 集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个主集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置一个主集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 ```yaml multicluster: clusterRole: host ``` +要设置主集群名称,请在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + {{< notice note >}} -如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置 Host 集群。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +{{< notice info >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置主集群。 {{}} @@ -60,7 +91,7 @@ multicluster: {{}} -您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果 Host 集群已准备就绪,您将看到成功的日志返回。 +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果主集群已准备就绪,您将看到成功的日志返回。 ```bash kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f @@ -68,7 +99,7 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app= ## 设置代理服务地址 -安装 Host 集群后,将在 `kubesphere-system` 中创建一个名为 `tower` 的代理服务,其类型为 `LoadBalancer`。 +安装主集群后,将在 `kubesphere-system` 中创建一个名为 `tower` 的代理服务,其类型为 `LoadBalancer`。 {{< tabs >}} @@ -89,7 +120,7 @@ tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP {{< notice note >}} -一般来说,主流公有云厂商会提供 LoadBalancer 解决方案,并且负载均衡器可以自动分配外部 IP。如果您的集群运行在本地环境中,尤其是在**裸机环境**中,可以使用 [PorterLB](https://github.com/kubesphere/porter) 作为负载均衡器解决方案。 +一般来说,主流公有云厂商会提供 LoadBalancer 解决方案,并且负载均衡器可以自动分配外部 IP。如果您的集群运行在本地环境中,尤其是在**裸机环境**中,可以使用 [OpenELB](https://github.com/kubesphere/openelb) 作为负载均衡器解决方案。 {{}} @@ -113,7 +144,7 @@ tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP - 选项 A - 使用 Web 控制台: - 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**自定义资源 CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的 **CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 - 选项 B - 使用 Kubectl: @@ -139,9 +170,9 @@ tower LoadBalancer 10.233.63.191 139.198.110.23 8080:30721/TCP {{}} -## 准备 Member 集群 +## 准备成员集群 -为了通过 **Host 集群**管理 Member 集群,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在 **Host 集群**中执行以下命令来获取它。 +为了通过**主集群**管理成员集群,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在**主集群**中执行以下命令来获取它。 ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -161,7 +192,7 @@ jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" - 选项 A - 使用 Web 控制台: - 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**自定义资源 CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的 **CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 - 选项 B - 使用 Kubectl: @@ -176,7 +207,7 @@ authentication: jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU ``` -向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**更新**(如果使用 Web 控制台)使其生效: +向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**确定**(如果使用 Web 控制台)使其生效: ```yaml multicluster: @@ -189,7 +220,7 @@ multicluster: {{< tab "尚未安装 KubeSphere" >}} -在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义 Member 集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置 Member 集群,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义成员集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置成员集群,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 ```yaml authentication: @@ -203,7 +234,7 @@ multicluster: {{< notice note >}} -如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置 Member 集群。 +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置成员集群。 {{}} @@ -211,28 +242,20 @@ multicluster: {{}} -您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果 Member 集群已准备就绪,您将看到成功的日志返回。 +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果成员集群已准备就绪,您将看到成功的日志返回。 ```bash kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f ``` -## 导入 Member 集群 +## 导入成员集群 1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面点击**添加集群**。 - ![添加集群](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.PNG) - 2. 在**导入集群**页面输入要导入的集群的基本信息。您也可以点击右上角的**编辑模式**以 YAML 格式查看并编辑基本信息。编辑完成后,点击**下一步**。 - ![集群信息](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.PNG) +3. 在**连接方式**,选择**集群连接代理**,然后点击**创建**。主集群为代理部署 (Deployment) 生成的 YAML 配置文件会显示在控制台上。 -3. 在**连接方式**,选择**集群连接代理**,然后点击**创建**。H 集群为代理部署 (Deployment) 生成的 YAML 配置文件会显示在控制台上。 +4. 根据指示在成员集群中创建一个 `agent.yaml` 文件,然后将代理部署复制并粘贴到该文件中。在该节点上执行 `kubectl create -f agent.yaml` 然后等待代理启动并运行。请确保成员集群可以访问代理地址。 - ![代理连接](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png) - -4. 根据指示在 M 集群中创建一个 `agent.yaml` 文件,然后将代理部署复制并粘贴到该文件中。在该节点上执行 `kubectl create -f agent.yaml` 然后等待代理启动并运行。请确保 M 集群可以访问代理地址。 - -5. 待集群代理启动并运行,您会看到 M 集群已经导入 H 集群。 - - ![已导入的集群](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.PNG) +5. 待集群代理启动并运行,您会看到成员集群已经导入主集群。 diff --git a/content/zh/docs/multicluster-management/enable-multicluster/direct-connection.md b/content/zh/docs/multicluster-management/enable-multicluster/direct-connection.md index a0a4318a4..dd9bf1675 100644 --- a/content/zh/docs/multicluster-management/enable-multicluster/direct-connection.md +++ b/content/zh/docs/multicluster-management/enable-multicluster/direct-connection.md @@ -6,13 +6,13 @@ linkTitle: "直接连接" weight: 5210 --- -如果 Host 集群(简称 H 集群)的任何节点都能访问 Member 集群(简称 M 集群)的 kube-apiserver 地址,您可以采用**直接连接**。当 M 集群的 kube-apiserver 地址可以暴露给外网,或者 H 集群和 M 集群在同一私有网络或子网中时,此方法均适用。 +如果主集群的任何节点都能访问的 kube-apiserver 地址,您可以采用**直接连接**。当成员集群的 kube-apiserver 地址可以暴露给外网,或者主集群和成员集群在同一私有网络或子网中时,此方法均适用。 -要通过直接连接使用多集群功能,您必须拥有至少两个集群,分别用作 H 集群和 M 集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为 H 集群或 M 集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 +要通过直接连接使用多集群功能,您必须拥有至少两个集群,分别用作主集群和成员集群。您可以在安装 KubeSphere 之前或者之后将一个集群指定为主集群或成员集群。有关安装 KubeSphere 的更多信息,请参考[在 Linux 上安装](../../../installing-on-linux/)和[在 Kubernetes 上安装](../../../installing-on-kubernetes/)。 -## 准备 Host 集群 +## 准备主集群 -Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集群。 +主集群为您提供中央控制平面,并且您只能指定一个主集群。 {{< tabs >}} @@ -22,7 +22,7 @@ Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集 - 选项 A - 使用 Web 控制台: - 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**自定义资源 CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的 **CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 - 选项 B - 使用 Kubectl: @@ -30,29 +30,60 @@ Host 集群为您提供中央控制平面,并且您只能指定一个 Host 集 kubectl edit cc ks-installer -n kubesphere-system ``` -在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**更新**(如果使用 Web 控制台)使其生效: +在 `ks-installer` 的 YAML 文件中,搜寻到 `multicluster`,将 `clusterRole` 的值设置为 `host`,然后点击**确定**(如果使用 Web 控制台)使其生效: ```yaml multicluster: clusterRole: host ``` +要设置主集群名称,请在 `ks-installer` 的 YAML 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + +{{< notice note >}} + +- 建议您在准备主集群的同时设置主集群名称。若您的主集群已在运行并且已经部署过资源,不建议您再去设置主集群名称。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + 您需要**稍等片刻**待该更改生效。 {{}} {{< tab "尚未安装 KubeSphere" >}} -在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个 Host 集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置一个 Host 集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义一个主集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置一个主集群,请在安装 KubeSphere 之前,将 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中对应的 `clusterRole` 的值修改为 `host`。 ```yaml multicluster: clusterRole: host ``` +要设置主集群名称,请在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中的 `multicluster.clusterRole` 下添加 `hostClusterName` 字段: + +```yaml +multicluster: + clusterRole: host + hostClusterName: <主集群名称> +``` + {{< notice note >}} -如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置 Host 集群。 +- 主集群名称只能包含小写字母、数字、连字符(-)或者半角句号(.),必须以小写字母或数字开头和结尾。 + +{{}} + +{{< notice info >}} + +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置主集群。 {{}} @@ -60,15 +91,15 @@ multicluster: {{}} -您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果 Host 集群已准备就绪,您将看到成功的日志返回。 +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果主集群已准备就绪,您将看到成功的日志返回。 ```bash kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f ``` -## 准备 Member 集群 +## 准备成员集群 -为了通过 **Host 集群**管理 Member 集群,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在 **Host 集群**中执行以下命令来获取它。 +为了通过**主集群**管理,您需要使它们之间的 `jwtSecret` 相同。因此,您首先需要在**主集群**中执行以下命令来获取它。 ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -88,7 +119,7 @@ jwtSecret: "gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU" - 选项 A - 使用 Web 控制台: - 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的**自定义资源 CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 + 使用 `admin` 帐户登录控制台,然后进入**集群管理**页面上的 **CRD**,输入关键字 `ClusterConfiguration`,然后转到其详情页面。编辑 `ks-installer` 的 YAML 文件,方法类似于[启用可插拔组件](../../../pluggable-components/)。 - 选项 B - 使用 Kubectl: @@ -103,7 +134,7 @@ authentication: jwtSecret: gfIwilcc0WjNGKJ5DLeksf2JKfcLgTZU ``` -向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**更新**(如果使用 Web 控制台)使其生效: +向下滚动并将 `clusterRole` 的值设置为 `member`,然后点击**确定**(如果使用 Web 控制台)使其生效: ```yaml multicluster: @@ -116,7 +147,9 @@ multicluster: {{< tab "尚未安装 KubeSphere" >}} -在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义 Member 集群。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。要设置 Member 集群,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 +在 Linux 上或者在现有 Kubernetes 集群上安装 KubeSphere 之前,您可以定义。如果您想[在 Linux 上安装 KubeSphere](../../../installing-on-linux/introduction/multioverview/#1-创建示例配置文件),需要使用 `config-sample.yaml` 文件。如果您想[在现有 Kubernetes 集群上安装 KubeSphere](../../../installing-on-kubernetes/introduction/overview/#部署-kubesphere),需要使用两个 YAML 文件,其中一个是 `cluster-configuration.yaml`。 + +要设置,请在安装 KubeSphere 之前,在 `config-sample.yaml` 或 `cluster-configuration.yaml` 文件中输入上方 `jwtSecret` 所对应的值,并将 `clusterRole` 的值修改为 `member`。 ```yaml authentication: @@ -130,7 +163,7 @@ multicluster: {{< notice note >}} -如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置 Member 集群。 +如果您在单节点集群上安装 KubeSphere ([All-in-One](../../../quick-start/all-in-one-on-linux/)),则不需要创建 `config-sample.yaml` 文件。这种情况下,您可以在安装 KubeSphere 之后设置。 {{}} @@ -138,32 +171,25 @@ multicluster: {{}} -您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果 Member 集群已准备就绪,您将看到成功的日志返回。 +您可以使用 **kubectl** 来获取安装日志以验证状态。运行以下命令,稍等片刻,如果已准备就绪,您将看到成功的日志返回。 ```bash kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f ``` -## 导入 Member 集群 +## 导入成员集群 1. 以 `admin` 身份登录 KubeSphere 控制台,转到**集群管理**页面点击**添加集群**。 - ![添加集群](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.PNG) - 2. 在**导入集群**页面,输入要导入的集群的基本信息。您也可以点击右上角的**编辑模式**以 YAML 格式查看并编辑基本信息。编辑完成后,点击**下一步**。 - ![集群信息](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.PNG) - -3. 在**连接方式**,选择**直接连接 Kubernetes 集群**,复制 Member 集群的 KubeConfig 内容并粘贴至文本框。您也可以点击右上角的**编辑模式**以 YAML 格式编辑 Member 集群的 KubeConfig。 +3. 在**连接方式**,选择**直接连接 Kubernetes 集群**,复制 kubeconfig 内容并粘贴至文本框。您也可以点击右上角的**编辑模式**以 YAML 格式编辑的 kubeconfig。 {{< notice note >}} -请确保 Host 集群的任何节点都能访问 KubeConfig 中的 `server` 地址。 +请确保主集群的任何节点都能访问 kubeconfig 中的 `server` 地址。 {{}} - - ![kubeconfig](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.PNG) 4. 点击**创建**,然后等待集群初始化完成。 - ![已导入的集群](/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.PNG) diff --git a/content/zh/docs/multicluster-management/enable-multicluster/retrieve-kubeconfig.md b/content/zh/docs/multicluster-management/enable-multicluster/retrieve-kubeconfig.md index 2c8ee9d3b..12fe8612c 100644 --- a/content/zh/docs/multicluster-management/enable-multicluster/retrieve-kubeconfig.md +++ b/content/zh/docs/multicluster-management/enable-multicluster/retrieve-kubeconfig.md @@ -1,20 +1,20 @@ --- -title: "获取 KubeConfig" +title: "获取 Kubeconfig" keywords: 'Kubernetes, KubeSphere, 多集群, 混合云, kubeconfig' -description: '获取通过直接连接导入集群所需的 KubeConfig。' -linkTitle: "获取 KubeConfig" +description: '获取通过直接连接导入集群所需的 kubeconfig。' +linkTitle: "获取 Kubeconfig" weight: 5230 --- -如果您使用[直接连接](../direct-connection/)导入 Member 集群,则需要提供 KubeConfig。 +如果您使用[直接连接](../direct-connection/)导入,则需要提供 kubeconfig。 ## 准备工作 您有一个 Kubernetes 集群。 -## 获取 KubeConfig +## 获取 Kubeconfig -进入 `$HOME/.kube`,检查目录中的文件,通常该目录下存在一个名为 `config` 的文件。使用以下命令获取 KubeConfig 文件: +进入 `$HOME/.kube`,检查目录中的文件,通常该目录下存在一个名为 `config` 的文件。使用以下命令获取 kubeconfig 文件: ```bash cat $HOME/.kube/config diff --git a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md index b789c6f05..376f6bcb9 100644 --- a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md +++ b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack.md @@ -10,14 +10,14 @@ weight: 5310 ## 准备工作 -- 您需要准备已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为 Host 集群。有关如何准备 Host 集群的更多信息,请参考[准备 Host 集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 -- 您需要准备已安装 KubeSphere 的 ACK 集群,用作 Member 集群。 +- 您需要准备已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备已安装 KubeSphere 的 ACK 集群,用作。 ## 导入 ACK 集群 -### 步骤 1:准备 ACK Member 集群 +### 步骤 1:准备 ACK 成员集群 -1. 为了通过 Host 集群管理 Member 集群,您需要使它们之间的 `jwtSecret` 相同。因此,首先需要在 Host 集群上执行以下命令获取 `jwtSecret`。 +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。因此,首先需要在主集群上执行以下命令获取 `jwtSecret`。 ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -31,14 +31,10 @@ weight: 5310 2. 以 `admin` 身份登录 ACK 集群的 KubeSphere 控制台。点击左上角的**平台管理**,选择**集群管理**。 -3. 访问**自定义资源 CRD**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 - - ![search-config](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png) +3. 访问 **CRD**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 4. 点击右侧的 ,选择**编辑配置文件**来编辑 `ks-installer`。 - ![click-edit](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png) - 5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值修改为如上所示的相应值,将 `clusterRole` 的值设置为 `member`。点击**更新**保存更改。 ```yaml @@ -57,26 +53,18 @@ weight: 5310 {{}} -### 步骤 2:获取 KubeConfig 文件 +### 步骤 2:获取 kubeconfig 文件 -登录阿里云的控制台。访问**容器服务 - Kubernetes** 下的**集群**,点击您的集群访问其详情页,然后选择**连接信息**选项卡。您可以看到**公网访问**选项卡下的 KubeConfig 文件。复制 KubeConfig 文件的内容。 +登录阿里云的控制台。访问**容器服务 - Kubernetes** 下的**集群**,点击您的集群访问其详情页,然后选择**连接信息**选项卡。您可以看到**公网访问**选项卡下的 kubeconfig 文件。复制 kubeconfig 文件的内容。 ![kubeconfig](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/kubeconfig.png) -### 步骤 3:导入 ACK Member 集群 +### 步骤 3:导入 ACK 成员集群 -1. 以 `admin` 身份登录 Host 集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 - - ![click-add-cluster](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png) +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 2. 按需填写基本信息,然后点击**下一步**。 - ![input-info](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png) - -3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 ACK Member 集群的 KubeConfig,然后点击**创建**。 - - ![select-method](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png) +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 ACK 的 kubeconfig,然后点击**创建**。 4. 等待集群初始化完成。 - - ![ack-cluster-imported](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png) \ No newline at end of file diff --git a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md index 250b27512..d1a86863b 100644 --- a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md +++ b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-aws-eks.md @@ -10,8 +10,8 @@ weight: 5320 ## 准备工作 -- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将其设置为 Host 集群。有关如何准备 Host 集群的更多信息,请参考[准备 Host 集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 -- 您需要准备一个 EKS 集群,用作 Member 集群。 +- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将其设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备一个 EKS 集群,用作。 ## 导入 EKS 集群 @@ -19,9 +19,9 @@ weight: 5320 您需要首先在 EKS 集群上部署 KubeSphere。有关如何在 EKS 上部署 KubeSphere 的更多信息,请参考[在 AWS EKS 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/#在-eks-上安装-kubesphere)。 -### 步骤 2:准备 EKS Member 集群 +### 步骤 2:准备 EKS 成员集群 -1. 为了通过 Host 集群管理 Member 集群,您需要使它们之间的 `jwtSecret` 相同。首先,需要在 Host 集群上执行以下命令获取 `jwtSecret`。 +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。首先,需要在主集群上执行以下命令获取 `jwtSecret`。 ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -35,14 +35,10 @@ weight: 5320 2. 以 `admin` 身份登录 EKS 集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 -3. 访问**自定义资源 CRD**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 - - ![search-config](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png) +3. 访问 **CRD**,在搜索栏输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 4. 点击右侧的 ,选择**编辑配置文件**来编辑 `ks-installer`。 - ![click-edit](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png) - 5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值改为如上所示的相应值,将 `clusterRole` 的值改为 `member`。点击**更新**保存更改。 ```yaml @@ -61,9 +57,9 @@ weight: 5320 {{}} -### 步骤 3:创建新的 KubeConfig 文件 +### 步骤 3:创建新的 kubeconfig 文件 -1. [Amazon EKS](https://docs.aws.amazon.com/zh_cn/eks/index.html) 不像标准的 kubeadm 集群那样提供内置的 KubeConfig 文件。但您可以参考此[文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-kubeconfig.html)创建 KubeConfig 文件。生成的 KubeConfig 文件类似如下: +1. [Amazon EKS](https://docs.aws.amazon.com/zh_cn/eks/index.html) 不像标准的 kubeadm 集群那样提供内置的 kubeconfig 文件。但您可以参考此[文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-kubeconfig.html)创建 kubeconfig 文件。生成的 kubeconfig 文件类似如下: ```yaml apiVersion: v1 @@ -98,9 +94,9 @@ weight: 5320 # value: "" ``` - 但是,自动生成的 KubeConfig 文件要求使用此 KubeConfig 的每台计算机均安装有 `aws` 命令(aws CLI 工具)。 + 但是,自动生成的 kubeconfig 文件要求使用此 kubeconfig 的每台计算机均安装有 `aws` 命令(aws CLI 工具)。 -2. 在本地计算机上运行以下命令,获得由 KubeSphere 创建的 ServiceAccount `kubesphere` 的令牌,该令牌对集群具有集群管理员访问权限,并将用作新的 KubeConfig 令牌。 +2. 在本地计算机上运行以下命令,获得由 KubeSphere 创建的 ServiceAccount `kubesphere` 的令牌,该令牌对集群具有集群管理员访问权限,并将用作新的 kubeconfig 令牌。 ```bash TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d) @@ -108,7 +104,7 @@ weight: 5320 kubectl config set-context --current --user=kubesphere ``` -3. 运行以下命令获取新的 KubeConfig 文件: +3. 运行以下命令获取新的 kubeconfig 文件: ```bash cat ~/.kube/config @@ -150,7 +146,7 @@ weight: 5320 token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImlCRHF4SlE5a0JFNDlSM2xKWnY1Vkt5NTJrcDNqRS1Ta25IYkg1akhNRmsifQ.eyJpc3M................9KQtFULW544G-FBwURd6ArjgQ3Ay6NHYWZe3gWCHLmag9gF-hnzxequ7oN0LiJrA-al1qGeQv-8eiOFqX3RPCQgbybmix8qw5U6f-Rwvb47-xA ``` - 您可以运行以下命令检查新的 KubeConfig 是否有权限访问 EKS 集群。 + 您可以运行以下命令检查新的 kubeconfig 是否有权限访问 EKS 集群。 ```shell kubectl get nodes @@ -164,20 +160,12 @@ weight: 5320 ip-10-0-8-148.cn-north-1.compute.internal Ready 78m v1.18.8-eks-7c9bda ``` -### 步骤 4:导入 EKS Member 集群 +### 步骤 4:导入 EKS 成员集群 -1. 以 `admin` 身份登录 Host 集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,然后选择**集群管理**。在**集群管理**页面,点击**添加集群**。 - - ![click-add-cluster](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png) +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,然后选择**集群管理**。在**集群管理**页面,点击**添加集群**。 2. 按需输入基本信息,然后点击**下一步**。 - ![input-info](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png) - -3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 EKS Member 集群的 KubeConfig,然后点击**创建**。 - - ![eks-kubeconfig](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png) +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 EKS 的新 kubeconfig,然后点击**创建**。 4. 等待集群初始化完成。 - - ![eks-overview](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png) \ No newline at end of file diff --git a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md index 511ef9630..89884b297 100644 --- a/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md +++ b/content/zh/docs/multicluster-management/import-cloud-hosted-k8s/import-gke.md @@ -10,8 +10,8 @@ weight: 5330 ## 准备工作 -- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为 Host 集群。有关如何准备 Host 集群的更多信息,请参考[准备 Host 集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 -- 您需要准备一个 GKE 集群,用作 Member 集群。 +- 您需要准备一个已安装 KubeSphere 的 Kubernetes 集群,并将该集群设置为主集群。有关如何准备主集群的更多信息,请参考[准备主集群](../../../multicluster-management/enable-multicluster/direct-connection/#准备-host-集群)。 +- 您需要准备一个 GKE 集群,用作。 ## 导入 GKE 集群 @@ -19,9 +19,9 @@ weight: 5330 您需要首先在 GKE 集群上部署 KubeSphere。有关如何在 GKE 上部署 KubeSphere 的更多信息,请参考[在 Google GKE 上部署 KubeSphere](../../../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/)。 -### 步骤 2:准备 GKE Member 集群 +### 步骤 2:准备 GKE 成员集群 -1. 为了通过 Host 集群管理 Member 集群,您需要使它们之间的 `jwtSecret` 相同。首先,在 Host 集群上执行以下命令获取 `jwtSecret`。 +1. 为了通过主集群管理,您需要使它们之间的 `jwtSecret` 相同。首先,在主集群上执行以下命令获取 `jwtSecret`。 ```bash kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret @@ -35,14 +35,10 @@ weight: 5330 2. 以 `admin` 身份登录 GKE 的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。 -3. 访问**自定义资源 CRD**,在搜索栏中输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 - - ![search-config](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png) +3. 访问 **CRD**,在搜索栏中输入 `ClusterConfiguration`,然后按下键盘上的**回车键**。点击 **ClusterConfiguration** 访问其详情页。 4. 点击右侧的 ,选择**编辑配置文件**来编辑 `ks-installer`。 - ![click-edit](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png) - 5. 在 `ks-installer` 的 YAML 文件中,将 `jwtSecret` 的值改为如上所示的相应值,将 `clusterRole` 的值改为 `member`。 ```yaml @@ -61,7 +57,7 @@ weight: 5330 {{}} -### 步骤 3:创建新的 KubeConfig 文件 +### 步骤 3:创建新的 kubeconfig 文件 1. 在 GKE Cloud Shell 终端运行以下命令: @@ -71,7 +67,7 @@ weight: 5330 kubectl config set-context --current --user=kubesphere ``` -2. 运行以下命令获取新的 KubeConfig 文件: +2. 运行以下命令获取新的 kubeconfig 文件: ```bash cat ~/.kube/config @@ -109,20 +105,12 @@ weight: 5330 token: eyJhbGciOiJSUzI1NiIsImtpZCI6InNjOFpIb3RrY3U3bGNRSV9NWV8tSlJzUHJ4Y2xnMDZpY3hhc1BoVy0xTGsifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlc3BoZXJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlc3BoZXJlLXRva2VuLXpocmJ3Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6Imt1YmVzcGhlcmUiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyMGFmZGI1Ny01MTBkLTRjZDgtYTAwYS1hNDQzYTViNGM0M2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXNwaGVyZS1zeXN0ZW06a3ViZXNwaGVyZSJ9.ic6LaS5rEQ4tXt_lwp7U_C8rioweP-ZdDjlIZq91GOw9d6s5htqSMQfTeVlwTl2Bv04w3M3_pCkvRzMD0lHg3mkhhhP_4VU0LIo4XeYWKvWRoPR2kymLyskAB2Khg29qIPh5ipsOmGL9VOzD52O2eLtt_c6tn-vUDmI_Zw985zH3DHwUYhppGM8uNovHawr8nwZoem27XtxqyBkqXGDD38WANizyvnPBI845YqfYPY5PINPYc9bQBFfgCovqMZajwwhcvPqS6IpG1Qv8TX2lpuJIK0LLjiKaHoATGvHLHdAZxe_zgAC2cT_9Ars3HIN4vzaSX0f-xP--AcRgKVSY9g ``` -### 步骤 4:导入 GKE Member 集群 +### 步骤 4:导入 GKE 成员集群 -1. 以 `admin` 身份登录 Host 集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 - - ![click-add-cluster](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png) +1. 以 `admin` 身份登录主集群的 KubeSphere Web 控制台。点击左上角的**平台管理**,选择**集群管理**。在**集群管理**页面,点击**添加集群**。 2. 按需输入基本信息,然后点击**下一步**。 - ![input-info](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png) - -3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 GKE Member 集群的新 KubeConfig,然后点击**创建**。 - - ![select-method](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png) +3. **连接方式**选择**直接连接 Kubernetes 集群**。填写 GKE 的新 kubeconfig,然后点击**创建**。 4. 等待集群初始化完成。 - - ![gke-cluster-imported](/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png) \ No newline at end of file diff --git a/content/zh/docs/multicluster-management/import-on-prem-k8s/_index.md b/content/zh/docs/multicluster-management/import-on-prem-k8s/_index.md deleted file mode 100644 index f3c73e7b7..000000000 --- a/content/zh/docs/multicluster-management/import-on-prem-k8s/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -linkTitle: "导入本地 Kubernetes 集群" -weight: 5400 - -_build: - render: false ---- diff --git a/content/zh/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md b/content/zh/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md deleted file mode 100644 index 2870c6b51..000000000 --- a/content/zh/docs/multicluster-management/import-on-prem-k8s/import-kubeadm-k8s.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Import Kubeadm Kubernetes Cluster" -keywords: 'kubernetes, kubesphere, multicluster, kubeadm' -description: '了解如何导入通过 kubeadm 创建的 Kubernetes 集群。' - - -weight: 5410 ---- - -TBD diff --git a/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md b/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md index cf30d2947..a5ae68358 100644 --- a/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md +++ b/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md @@ -1,7 +1,7 @@ --- title: "KubeSphere 联邦" keywords: 'Kubernetes, KubeSphere, 联邦, 多集群, 混合云' -description: '了解 KubeSphere 中的 Kubernetes 联邦的基本概念,包括 M 集群和 H 集群。' +description: '了解 KubeSphere 中的 Kubernetes 联邦的基本概念,包括成员集群和主集群。' linkTitle: "KubeSphere 联邦" weight: 5120 --- @@ -10,11 +10,11 @@ weight: 5120 ## 多集群架构如何运作 -在使用 KubeSphere 的中央控制平面管理多个集群之前,您需要创建一个 Host 集群(以下称为 **H** 集群)。H 集群实际上是一个启用了多集群功能的 KubeSphere 集群,您可以使用它提供的控制平面统一管理 Member 集群(以下称为 **M** 集群)。M 集群是没有中央控制平面的普通 KubeSphere 集群。也就是说,拥有必要权限的租户(通常是集群管理员)能够通过 H 集群访问控制平面,管理所有 M 集群,例如查看和编辑 M 集群上面的资源。反过来,如果您单独访问任意 M 集群的 Web 控制台,您将无法查看其他集群的任何资源。 +在使用 KubeSphere 的中央控制平面管理多个集群之前,您需要创建一个主集群。主集群实际上是一个启用了多集群功能的 KubeSphere 集群,您可以使用它提供的控制平面统一管理。成员集群是没有中央控制平面的普通 KubeSphere 集群。也就是说,拥有必要权限的租户(通常是集群管理员)能够通过主集群访问控制平面,管理所有成员集群,例如查看和编辑成员集群上面的资源。反过来,如果您单独访问任意成员集群的 Web 控制台,您将无法查看其他集群的任何资源。 -![中央控制平面](/images/docs/zh-cn/multicluster-management/introduction/kubesphere-federation/central-control-plane.png) +只能有一个主集群存在,而多个成员集群可以同时存在。在多集群架构中,主集群和成员集群之间的网络可以[直接连接](../../enable-multicluster/direct-connection/),或者通过[代理连接](../../enable-multicluster/agent-connection/)。成员集群之间的网络可以设置在完全隔离的环境中。 -只能有一个 H 集群存在,而多个 M 集群可以同时存在。在多集群架构中,H 集群和 M 集群之间的网络可以直接连接,或者通过代理连接。M 集群之间的网络可以设置在完全隔离的环境中。 +如果您是使用通过 kubeadm 搭建的自建 Kubernetes 集群,请参阅[离线安装](../../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/)在您的 Kubernetes 集群上安装 KubeSphere,然后通过直接连接或者代理连接来启用 KubeSphere 多集群管理功能。 ![Kubernetes 联邦](/images/docs/zh-cn/multicluster-management/introduction/kubesphere-federation/kubesphere-federation.png) @@ -38,12 +38,12 @@ KubeSphere 拥有功能强大的中央控制平面,您可以统一纳管部署 {{< notice note >}} - CPU 和内存的资源请求和限制均指单个副本的要求。 -- 多集群功能启用后,H 集群上会安装 Tower 和 controller-manager。如果您使用[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/),M 集群仅需要 Tower。如果您使用[直接连接](../../../multicluster-management/enable-multicluster/direct-connection/),M 集群无需额外组件。 +- 多集群功能启用后,主集群上会安装 Tower 和 controller-manager。如果您使用[代理连接](../../../multicluster-management/enable-multicluster/agent-connection/),成员集群仅需要 Tower。如果您使用[直接连接](../../../multicluster-management/enable-multicluster/direct-connection/),成员集群无需额外组件。 {{}} ## 在多集群架构中使用应用商店 -与 KubeSphere 中的其他组件不同,[KubeSphere 应用商店](../../../pluggable-components/app-store/)是所有集群(包括 H 集群和 M 集群)的全局应用程序池。您只需要在 H 集群上启用应用商店,便可以直接在 M 集群上使用应用商店的相关功能(无论 M 集群是否启用应用商店),例如[应用模板](../../../project-user-guide/application/app-template/)和[应用仓库](../../../workspace-administration/app-repository/import-helm-repository/)。 +与 KubeSphere 中的其他组件不同,[KubeSphere 应用商店](../../../pluggable-components/app-store/)是所有集群(包括主集群和成员集群)的全局应用程序池。您只需要在主集群上启用应用商店,便可以直接在成员集群上使用应用商店的相关功能(无论成员集群是否启用应用商店),例如[应用模板](../../../project-user-guide/application/app-template/)和[应用仓库](../../../workspace-administration/app-repository/import-helm-repository/)。 -但是,如果只在 M 集群上启用应用商店而没有在 H 集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 \ No newline at end of file +但是,如果只在成员集群上启用应用商店而没有在主集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 \ No newline at end of file diff --git a/content/zh/docs/multicluster-management/unbind-cluster.md b/content/zh/docs/multicluster-management/unbind-cluster.md index ab2e632a7..9971e2e00 100644 --- a/content/zh/docs/multicluster-management/unbind-cluster.md +++ b/content/zh/docs/multicluster-management/unbind-cluster.md @@ -11,7 +11,7 @@ weight: 5500 ## 准备工作 - 您已经启用多集群管理。 -- 您需要有一个拥有**集群管理**权限角色的帐户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并授予至一个帐户。 +- 您需要有一个拥有**集群管理**权限角色的用户。例如,您可以直接以 `admin` 身份登录控制台,或者创建一个拥有该权限的新角色并授予至一个用户。 ## 解绑集群 @@ -19,12 +19,8 @@ weight: 5500 2. 在**集群管理**页面,请点击要从中央控制平面移除的集群。 - ![集群管理](/images/docs/zh-cn/multicluster-management/unbind-a-cluster/cluster-management.PNG) - 3. 在**集群设置**下的**基本信息**页面,请选择**我确定要执行解绑集群的操作**,然后点击**解除绑定**。 - ![解绑集群](/images/docs/zh-cn/multicluster-management/unbind-a-cluster/unbind-cluster.PNG) - {{< notice note >}} 解绑集群后,您将无法在中央控制平面管理该集群,但该集群上的 Kubernetes 资源不会被删除。 diff --git a/content/zh/docs/pluggable-components/alerting.md b/content/zh/docs/pluggable-components/alerting.md index fa6f1e139..e8d21f237 100644 --- a/content/zh/docs/pluggable-components/alerting.md +++ b/content/zh/docs/pluggable-components/alerting.md @@ -39,9 +39,9 @@ weight: 6600 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用告警系统。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用告警系统。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件并进行编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件并进行编辑。 ```bash vi cluster-configuration.yaml @@ -57,7 +57,7 @@ weight: 6600 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -66,15 +66,15 @@ weight: 6600 1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `alerting`,将 `enabled` 的 `false` 更改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `alerting`,将 `enabled` 的 `false` 更改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml alerting: @@ -94,6 +94,4 @@ weight: 6600 ## 验证组件的安装 -如果您在**集群管理**页面可以看到**告警消息**和**告警策略**,说明安装成功,因为安装组件之后才会显示这两部分。 - -![alerting-section](/images/docs/zh-cn/enable-pluggable-components/kubesphere-alerting/alerting-section.png) \ No newline at end of file +如果您在**集群管理**页面可以看到**告警消息**和**告警策略**,说明安装成功,因为安装组件之后才会显示这两部分。 \ No newline at end of file diff --git a/content/zh/docs/pluggable-components/app-store.md b/content/zh/docs/pluggable-components/app-store.md index d468ba0a6..8b7466653 100644 --- a/content/zh/docs/pluggable-components/app-store.md +++ b/content/zh/docs/pluggable-components/app-store.md @@ -1,16 +1,14 @@ --- title: "KubeSphere 应用商店" -keywords: "Kubernetes, KubeSphere, app-store, OpenPitrix" +keywords: "Kubernetes, KubeSphere, App Store, OpenPitrix" description: "了解如何启用应用商店,一个可以在内部实现数据和应用共享、并制定应用交付流程的行业标准的组件。" linkTitle: "KubeSphere 应用商店" weight: 6200 --- -作为一个开源的、以应用为中心的容器平台,KubeSphere 在 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的基础上,为用户提供了一个基于 Helm 的应用商店,用于应用生命周期管理。OpenPitrix 是一个开源的 Web 平台,用于打包、部署和管理不同类型的应用。KubeSphere 应用商店让 ISV、开发者和用户能够在一站式服务中只需点击几下就可以上传、测试、部署和发布应用。 +作为一个开源的、以应用为中心的容器平台,KubeSphere 在 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的基础上,为用户提供了一个基于 Helm 的应用商店,用于应用生命周期管理。OpenPitrix 是一个开源的 Web 平台,用于打包、部署和管理不同类型的应用。KubeSphere 应用商店让 ISV、开发者和用户能够在一站式服务中只需点击几下就可以上传、测试、安装和发布应用。 -对内,KubeSphere 应用商店可以作为不同团队共享数据、中间件和办公应用的场所。对外,有利于设立构建和交付的行业标准。默认情况下,应用商店中内置了 17 个应用。启用该功能后,您可以通过应用模板添加更多应用。 - -![app-store](/images/docs/zh-cn/enable-pluggable-components/kubesphere-app-store/app-store-page.png) +对内,KubeSphere 应用商店可以作为不同团队共享数据、中间件和办公应用的场所。对外,有利于设立构建和交付的行业标准。启用该功能后,您可以通过应用模板添加更多应用。 有关更多信息,请参阅[应用商店](../../application-store/)。 @@ -46,9 +44,9 @@ weight: 6200 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用应用商店(可选服务组件)。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用应用商店(可选系统组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -65,7 +63,7 @@ weight: 6200 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -74,15 +72,15 @@ weight: 6200 1. 以 `admin` 身份登录控制台,点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`,点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`,点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `openpitrix`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `openpitrix`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml openpitrix: @@ -103,19 +101,17 @@ weight: 6200 ## 验证组件的安装 -在您登录控制台后,如果您能看到页面左上角的**应用商店**以及其中的 17 个内置应用,则说明安装成功。 - -![app-store](/images/docs/zh-cn/enable-pluggable-components/kubesphere-app-store/app-store-page.png) +在您登录控制台后,如果您能看到页面左上角的**应用商店**以及其中的应用,则说明安装成功。 {{< notice note >}} -- 您可以在不登录控制台的情况下直接访问 `:30880/apps` 进入应用商店。 -- 与先前版本不同,KubeSphere 3.1 中的应用商店启用后,**OpenPitrix** 选项卡不会显示在**服务组件**页面。 +- 您可以在不登录控制台的情况下直接访问 `<节点 IP 地址>:30880/apps` 进入应用商店。 +- KubeSphere 3.2.x 中的应用商店启用后,**OpenPitrix** 页签不会显示在**系统组件**页面。 {{}} ## 在多集群架构中使用应用商店 -[在多集群架构中](../../multicluster-management/introduction/kubefed-in-kubesphere/),一个 Host 集群(H 集群)管理所有 Member 集群(M 集群)。与 KubeSphere 中的其他组件不同,应用商店是所有集群(包括 H 集群和 M 集群)的全局应用程序池。您只需要在 H 集群上启用应用商店,便可以直接在 M 集群上使用应用商店的相关功能(无论 M 集群是否启用应用商店),例如[应用模板](../../project-user-guide/application/app-template/)和[应用仓库](../../workspace-administration/app-repository/import-helm-repository/)。 +[在多集群架构中](../../multicluster-management/introduction/kubefed-in-kubesphere/),一个主集群管理所有成员集群。与 KubeSphere 中的其他组件不同,应用商店是所有集群(包括主集群和成员集群)的全局应用程序池。您只需要在主集群上启用应用商店,便可以直接在成员集群上使用应用商店的相关功能(无论成员集群是否启用应用商店),例如[应用模板](../../project-user-guide/application/app-template/)和[应用仓库](../../workspace-administration/app-repository/import-helm-repository/)。 -但是,如果只在 M 集群上启用应用商店而没有在 H 集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 +但是,如果只在成员集群上启用应用商店而没有在主集群上启用,您将无法在多集群架构中的任何集群上使用应用商店。 diff --git a/content/zh/docs/pluggable-components/auditing-logs.md b/content/zh/docs/pluggable-components/auditing-logs.md index 90a81eb3c..1f880ad3b 100644 --- a/content/zh/docs/pluggable-components/auditing-logs.md +++ b/content/zh/docs/pluggable-components/auditing-logs.md @@ -34,7 +34,7 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 ``` {{< notice note >}} -默认情况下,如果启用了审计功能,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +默认情况下,如果启用了审计功能,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -45,7 +45,7 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -57,9 +57,9 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 ### 在 Kubernetes 上安装 -在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用审计功能(可选组件)。 +在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用审计功能(可选组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -73,7 +73,7 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 ``` {{< notice note >}} -默认情况下,如果启用了审计功能,ks-installer 会安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +默认情况下,如果启用了审计功能,ks-installer 会安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -84,14 +84,14 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -100,15 +100,15 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `auditing`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `auditing`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml auditing: @@ -116,7 +116,7 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 ``` {{< notice note >}} -默认情况下,如果启用了审计功能,将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +默认情况下,如果启用了审计功能,将安装内置 Elasticsearch。对于生产环境,如果您想启用审计功能,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -127,7 +127,7 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -148,15 +148,13 @@ KubeSphere 审计日志系统提供了一套与安全相关并按时间顺序排 {{< tab "在仪表板中验证组件的安装" >}} -验证您可以使用右下角**工具箱**中的**操作审计**功能。 - -![auditing-operating](/images/docs/zh-cn/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png) +验证您可以使用右下角**工具箱**中的**审计日志查询**功能。 {{}} {{< tab "通过 kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n kubesphere-logging-system diff --git a/content/zh/docs/pluggable-components/devops.md b/content/zh/docs/pluggable-components/devops.md index 851d3d0e7..27e78b36e 100644 --- a/content/zh/docs/pluggable-components/devops.md +++ b/content/zh/docs/pluggable-components/devops.md @@ -43,9 +43,9 @@ DevOps 系统为用户提供了一个自动化的环境,应用可以自动发 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用 DevOps(可选服务组件)。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用 DevOps(可选系统组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -61,7 +61,7 @@ DevOps 系统为用户提供了一个自动化的环境,应用可以自动发 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -70,15 +70,15 @@ DevOps 系统为用户提供了一个自动化的环境,应用可以自动发 1. 以 `admin` 身份登录控制台,点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`,点击搜索结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `devops`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `devops`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml devops: @@ -102,15 +102,13 @@ DevOps 系统为用户提供了一个自动化的环境,应用可以自动发 {{< tab "在仪表板中验证组件的安装" >}} -进入**服务组件**,检查 **DevOps** 的状态,可以看到如下类似图片: - -![devops](/images/docs/zh-cn/enable-pluggable-components/kubesphere-devops-system/devops.png) +进入**系统组件**,检查 **DevOps** 标签页中的所有组件都处于**健康**状态。 {{}} {{< tab "通过 kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n kubesphere-devops-system @@ -120,7 +118,7 @@ kubectl get pod -n kubesphere-devops-system ```bash NAME READY STATUS RESTARTS AGE -ks-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m +devops-jenkins-5cbbfbb975-hjnll 1/1 Running 0 40m s2ioperator-0 1/1 Running 0 41m ``` diff --git a/content/zh/docs/pluggable-components/events.md b/content/zh/docs/pluggable-components/events.md index 3a4db239b..1545cbbb3 100644 --- a/content/zh/docs/pluggable-components/events.md +++ b/content/zh/docs/pluggable-components/events.md @@ -36,7 +36,7 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 ``` {{< notice note >}} -默认情况下,如果启用了事件系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +默认情况下,如果启用了事件系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -47,7 +47,7 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -59,9 +59,9 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 ### 在 Kubernetes 上安装 -在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用事件系统(可选组件)。 +在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用事件系统(可选组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -75,7 +75,7 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 ``` {{< notice note >}} -对于生产环境,如果您想启用事件系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +对于生产环境,如果您想启用事件系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -86,14 +86,14 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -102,17 +102,17 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `events`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `events`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml events: @@ -121,7 +121,7 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 {{< notice note >}} -默认情况下,如果启用了事件系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 +默认情况下,如果启用了事件系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用事件系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -132,7 +132,7 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -154,15 +154,13 @@ KubeSphere 事件系统使用户能够跟踪集群内部发生的事件,例如 {{< tab "在仪表板中验证组件的安装" >}} -验证您可以使用右下角**工具箱**中的**事件查询**功能。 - -![event-search](/images/docs/zh-cn/enable-pluggable-components/kubesphere-events/event-search.png) +验证您可以使用右下角**工具箱**中的**资源事件查询**功能。 {{}} {{< tab "通过 kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n kubesphere-logging-system diff --git a/content/zh/docs/pluggable-components/kubeedge.md b/content/zh/docs/pluggable-components/kubeedge.md index 8b04185f6..7f3e16b3b 100644 --- a/content/zh/docs/pluggable-components/kubeedge.md +++ b/content/zh/docs/pluggable-components/kubeedge.md @@ -48,9 +48,9 @@ KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中首先启用 KubeEdge。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中首先启用 KubeEdge。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件并进行编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件并进行编辑。 ```bash vi cluster-configuration.yaml @@ -68,7 +68,7 @@ KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上 4. 保存文件并执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -77,13 +77,13 @@ KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上 1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 -2. 点击**自定义资源 CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 +2. 点击 **CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,然后选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 4. 在该配置文件中,搜寻到 `kubeedge.enabled`,将 `false` 更改为 `true` 以启用 KubeEdge。 @@ -92,13 +92,7 @@ KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上 enabled: true # 将“false”更改为“true”。 ``` -5. 将 `kubeedge.cloudCore.cloudHub.advertiseAddress` 的值设置为集群的公共 IP 地址或边缘节点可以访问的 IP 地址。完成后,点击右下角的**更新**保存配置。 - - {{< notice note >}} - -如果您的集群是从 KubeSphere v3.0.0 升级而来,`cluster-configuration.yaml` 中不会包含 KubeEdge 的配置。有关更多信息,请参见[如何在升级后启用 KubeEdge](#在升级后启用-kubeedge)。 - -{{}} +5. 将 `kubeedge.cloudCore.cloudHub.advertiseAddress` 的值设置为集群的公共 IP 地址或边缘节点可以访问的 IP 地址。完成后,点击右下角的**确定**保存配置。 6. 您可以使用 Web Kubectl 执行以下命令查看安装过程: @@ -111,60 +105,19 @@ KubeEdge 的组件在两个单独的位置运行——云上和边缘节点上 您可以通过点击控制台右下角的 来找到 Web kubectl 工具。 {{}} -## 在升级后启用 KubeEdge - -如果您的 KubeSphere v3.1.0 集群是从 KubeSphere v3.0.0 的集群升级而来,请按照[以上步骤](#在安装后启用-kubeedge)编辑 `cluster-configuration.yaml`(即 CRD `clusterconfiguration`)并手动添加以下配置,再启用 KubeEdge。 - -```yaml - kubeedge: - enabled: false - cloudCore: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] - cloudhubPort: "10000" - cloudhubQuicPort: "10001" - cloudhubHttpsPort: "10002" - cloudstreamPort: "10003" - tunnelPort: "10004" - cloudHub: - advertiseAddress: - - "" - nodeLimit: "100" - service: - cloudhubNodePort: "30000" - cloudhubQuicNodePort: "30001" - cloudhubHttpsNodePort: "30002" - cloudstreamNodePort: "30003" - tunnelNodePort: "30004" - edgeWatcher: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] - edgeWatcherAgent: - nodeSelector: {"node-role.kubernetes.io/worker": ""} - tolerations: [] -``` - -{{< notice warning >}} - -请勿在升级前直接在 `cluster-configuration.yaml` 中直接添加 KubeEdge 的配置。 - -{{}} - ## 验证组件的安装 {{< tabs >}} {{< tab "在仪表板中验证组件的安装" >}} -在**集群管理**页面,您可以看到**节点管理**下出现**边缘节点**板块。 - -![edge-nodes](/images/docs/zh-cn/enable-pluggable-components/kubeedge/edge-nodes.png) +在**集群管理**页面,您可以看到**节点**下出现**边缘节点**板块。 {{}} {{< tab "通过 Kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n kubeedge diff --git a/content/zh/docs/pluggable-components/logging.md b/content/zh/docs/pluggable-components/logging.md index df199e9ae..7d3ebf095 100644 --- a/content/zh/docs/pluggable-components/logging.md +++ b/content/zh/docs/pluggable-components/logging.md @@ -6,7 +6,7 @@ linkTitle: "KubeSphere 日志系统" weight: 6400 --- -KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的、易于使用的日志系统。它涵盖了不同层级的日志,包括租户、基础设施资源和应用。用户可以从项目、工作负载、Pod 和关键字等不同维度对日志进行搜索。与 Kibana 相比,KubeSphere 基于租户的日志系统中,每个租户只能查看自己的日志,从而可以在租户之间提供更好的隔离性和安全性。除了 KubeSphere 自身的日志系统,该容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。 +KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的、易于使用的日志系统。它涵盖了不同层级的日志,包括租户、基础设施资源和应用。用户可以从项目、工作负载、容器组和关键字等不同维度对日志进行搜索。与 Kibana 相比,KubeSphere 基于租户的日志系统中,每个租户只能查看自己的日志,从而可以在租户之间提供更好的隔离性和安全性。除了 KubeSphere 自身的日志系统,该容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。 有关更多信息,请参见[日志查询](../../toolbox/log-query/)。 @@ -35,10 +35,14 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 ```yaml logging: enabled: true # 将“false”更改为“true”。 + containerruntime: docker ``` - {{< notice note >}} -默认情况下,如果启用了日志系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.2.1,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 + + {{}} + + {{< notice note >}}默认情况下,如果启用了日志系统,KubeKey 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `config-sample.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,KubeKey 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -49,7 +53,7 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -61,9 +65,9 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用日志系统(可选服务组件)。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 的教程中说明了在 Kubernetes 上安装 KubeSphere 的流程,不过,需要事先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用日志系统(可选系统组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -74,10 +78,14 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 ```yaml logging: enabled: true # 将“false”更改为“true”。 + containerruntime: docker ``` - {{< notice note >}} -默认情况下,如果启用了日志系统,ks-installer 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `.logging.containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.2.1,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 + + {{}} + + {{< notice note >}}默认情况下,如果启用了日志系统,ks-installer 将安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在 `cluster-configuration.yaml` 中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在安装前提供以下信息后,ks-installer 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 {{}} ```yaml @@ -88,14 +96,14 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -104,27 +112,31 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `logging`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `logging`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml logging: enabled: true # 将“false”更改为“true”。 + containerruntime: docker ``` - {{< notice note >}}默认情况下,如果启用了日志系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{< notice info >}}若使用 containerd 作为容器运行时,请将 `.logging.containerruntime` 字段的值更改为 `containerd`。如果您从低版本升级至 KubeSphere 3.2.1,则启用 KubeSphere 日志系统时必须在 `logging` 字段下手动添加 `containerruntime` 字段。 {{}} + {{< notice note >}}默认情况下,如果启用了日志系统,将会安装内置 Elasticsearch。对于生产环境,如果您想启用日志系统,强烈建议在该 YAML 文件中设置以下值,尤其是 `externalElasticsearchHost` 和 `externalElasticsearchPort`。在文件中提供以下信息后,KubeSphere 将直接对接您的外部 Elasticsearch,不再安装内置 Elasticsearch。 + {{}} + ```yaml es: # Storage backend for logging, tracing, events and auditing. elasticsearchMasterReplicas: 1 # The total number of master nodes. Even numbers are not allowed. @@ -133,7 +145,7 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 elasticsearchDataVolumeSize: 20Gi # The volume size of Elasticsearch data nodes. logMaxAge: 7 # Log retention day in built-in Elasticsearch. It is 7 days by default. elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. - externalElasticsearchUrl: # The URL of external Elasticsearch. + externalElasticsearchHost: # The Host of external Elasticsearch. externalElasticsearchPort: # The port of external Elasticsearch. ``` @@ -155,15 +167,13 @@ KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的 {{< tab "在仪表板中验证组件的安装" >}} -进入**服务组件**,检查 **Logging** 的状态,可以看到如下类似图片: - -![logging](/images/docs/zh-cn/enable-pluggable-components/kubesphere-logging-system/logging.png) +进入**系统组件**,检查**日志**标签页中的所有组件都处于**健康**状态。 {{}} {{< tab "通过 kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n kubesphere-logging-system diff --git a/content/zh/docs/pluggable-components/metrics-server.md b/content/zh/docs/pluggable-components/metrics-server.md index b9dae34b9..723a56083 100644 --- a/content/zh/docs/pluggable-components/metrics-server.md +++ b/content/zh/docs/pluggable-components/metrics-server.md @@ -6,7 +6,7 @@ linkTitle: "Metrics Server" weight: 6910 --- -KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/deployments/)的 Pod 弹性伸缩程序 (HPA)。在 KubeSphere 中,Metrics Server 控制着 HPA 是否启用。您可以根据不同类型的指标(例如 CPU 和内存使用率,以及最小和最大副本数),使用 HPA 对象对部署 (Deployment) 自动伸缩。通过这种方式,HPA 可以帮助确保您的应用程序在不同情况下都能平稳、一致地运行。 +KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/deployments/)的容器组(Pod)弹性伸缩程序 (HPA)。在 KubeSphere 中,Metrics Server 控制着 HPA 是否启用。您可以根据不同类型的指标(例如 CPU 和内存使用率,以及最小和最大副本数),使用 HPA 对象对部署 (Deployment) 自动伸缩。通过这种方式,HPA 可以帮助确保您的应用程序在不同情况下都能平稳、一致地运行。 ## 在安装前启用 Metrics Server @@ -39,9 +39,9 @@ KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/d ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 教程中演示了在 Kubernetes 上安装 KubeSphere 的流程。若想安装可选组件 Metrics Server,您可以先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中先启用该组件。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 教程中演示了在 Kubernetes 上安装 KubeSphere 的流程。若想安装可选组件 Metrics Server,您可以先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中先启用该组件。 -1. 下载文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml),并打开文件进行编辑。 +1. 下载文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml),并打开文件进行编辑。 ```bash vi cluster-configuration.yaml @@ -57,7 +57,7 @@ KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/d 3. 执行以下命令以开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -70,17 +70,17 @@ KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/d 1. 以 `admin` 身份登录控制台。点击左上角**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看详情页。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看详情页。 {{< notice info >}} -自定义资源 (CRD) 能让用户创建新的资源类型,而无需添加其他 API 服务器。用户可以像其他原生 Kubernetes 对象一样使用这些资源。 +定制资源定义(CRD)能让用户创建新的资源类型,而无需添加其他 API 服务器。用户可以像其他原生 Kubernetes 对象一样使用这些资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,导航到 `metrics_server`,在 `enabled` 一行将 `false` 更改为 `true`。完成后,点击右下角的**更新**以保存配置。 +4. 在该 YAML 文件中,导航到 `metrics_server`,在 `enabled` 一行将 `false` 更改为 `true`。完成后,点击右下角的**确定**以保存配置。 ```yaml metrics_server: @@ -100,13 +100,13 @@ KubeSphere 支持用于[部署](../../project-user-guide/application-workloads/d ## 验证组件的安装 -执行以下命令以验证 Metrics Server 的 Pod 在正常运行。 +执行以下命令以验证 Metrics Server 的容器组在正常运行。 ```bash kubectl get pod -n kube-system ``` -如果 Metrics Server 安装成功,那么集群可能会返回以下输出(不包括无关 Pod): +如果 Metrics Server 安装成功,那么集群可能会返回以下输出(不包括无关容器组): ```bash NAME READY STATUS RESTARTS AGE diff --git a/content/zh/docs/pluggable-components/network-policy.md b/content/zh/docs/pluggable-components/network-policy.md index 32b4f553f..38bab9755 100644 --- a/content/zh/docs/pluggable-components/network-policy.md +++ b/content/zh/docs/pluggable-components/network-policy.md @@ -6,7 +6,7 @@ linkTitle: "网络策略" weight: 6900 --- -从 3.0.0 版本开始,用户可以在 KubeSphere 中配置原生 Kubernetes 的网络策略。网络策略是一种以应用为中心的结构,使您能够指定如何允许 Pod 通过网络与各种网络实体进行通信。通过网络策略,用户可以在同一集群内实现网络隔离,这意味着可以在某些实例 (Pod) 之间设置防火墙。 +从 3.0.0 版本开始,用户可以在 KubeSphere 中配置原生 Kubernetes 的网络策略。网络策略是一种以应用为中心的结构,使您能够指定如何允许容器组通过网络与各种网络实体进行通信。通过网络策略,用户可以在同一集群内实现网络隔离,这意味着可以在某些实例(容器组)之间设置防火墙。 {{< notice note >}} @@ -49,9 +49,9 @@ weight: 6900 ### 在 Kubernetes 上安装 -在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用网络策略(可选组件)。 +在 Kubernetes 上安装 KubeSphere 的过程与教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用网络策略(可选组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -68,7 +68,7 @@ weight: 6900 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -77,15 +77,15 @@ weight: 6900 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `network.networkpolicy`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `network.networkpolicy`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml network: @@ -106,6 +106,4 @@ weight: 6900 ## 验证组件的安装 -如果您能在**网络管理**中看到**网络策略**,如下图所示,说明安装成功,因为安装组件之后才会显示这部分。 - -![网络策略](/images/docs/zh-cn/enable-pluggable-components/network-policies/network-policy.PNG) \ No newline at end of file +如果您能在**网络**中看到**网络策略**,说明安装成功,因为安装组件之后才会显示这部分。 \ No newline at end of file diff --git a/content/zh/docs/pluggable-components/overview.md b/content/zh/docs/pluggable-components/overview.md index 302b4829d..0e8732c15 100644 --- a/content/zh/docs/pluggable-components/overview.md +++ b/content/zh/docs/pluggable-components/overview.md @@ -26,7 +26,7 @@ CPU 和内存的资源请求和限制均指单个副本的要求。 | 命名空间 | openpitrix-system | | -------- | -------------------------------------------- | -| CPU 请求 | 0.3 core | +| CPU 请求 | 0.3 核 | | CPU 限制 | | | 内存请求 | 300 MiB | | 内存限制 | | @@ -38,7 +38,7 @@ CPU 和内存的资源请求和限制均指单个副本的要求。 | 命名空间 | kubesphere-devops-system | kubesphere-devops-system | | -------- | ------------------------------------------------------------ | -------------------------------- | | 安装模式 | All-in-One 安装 | 多节点安装 | -| CPU 请求 | 34 m | 0.47 core | +| CPU 请求 | 34 m | 0.47 核 | | CPU 限制 | | | | 内存请求 | 2.69 G | 8.6 G | | 内存限制 | | | @@ -55,7 +55,7 @@ CPU 和内存的资源请求和限制均指单个副本的要求。 | 内存请求 | 400 MiB | 30 MiB | 20 MiB | | 内存限制 | 8 GiB | | 1 GiB | | 安装 | 必需 | 必需 | 必需 | -| 备注 | Prometheus 的内存消耗取决于集群大小。8 GiB 可满足 200 个节点/16,000 个 Pod 的集群规模。 | | | +| 备注 | Prometheus 的内存消耗取决于集群大小。8 GiB 可满足 200 个节点/16,000 个容器组的集群规模。 | | | {{< notice note >}} diff --git a/content/zh/docs/pluggable-components/pod-ip-pools.md b/content/zh/docs/pluggable-components/pod-ip-pools.md index 779c5ca2d..37856097d 100644 --- a/content/zh/docs/pluggable-components/pod-ip-pools.md +++ b/content/zh/docs/pluggable-components/pod-ip-pools.md @@ -1,12 +1,12 @@ --- title: "容器组 IP 池" -keywords: "Kubernetes, KubeSphere, Pod, IP 池" -description: "了解如何启用容器组 IP 池,为您的 Pod 分配一个特定的容器组 IP 池。" +keywords: "Kubernetes, KubeSphere, 容器组, IP 池" +description: "了解如何启用容器组 IP 池,为您的容器组分配一个特定的容器组 IP 池。" linkTitle: "容器组 IP 池" weight: 6920 --- -容器组 IP 池用于规划 Pod 网络地址空间,每个容器组 IP 池之间的地址空间不能重叠。创建工作负载时,可选择特定的容器组 IP 池,这样创建出的 Pod 将从该容器组 IP 池中分配 IP。 +容器组 IP 池用于规划容器组网络地址空间,每个容器组 IP 池之间的地址空间不能重叠。创建工作负载时,可选择特定的容器组 IP 池,这样创建出的容器组将从该容器组 IP 池中分配 IP 地址。 ## 安装前启用容器组 IP 池 @@ -41,9 +41,9 @@ weight: 6920 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中首先启用容器组 IP 池。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中首先启用容器组 IP 池。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件并进行编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件并进行编辑。 ```bash vi cluster-configuration.yaml @@ -60,7 +60,7 @@ weight: 6920 3. 执行以下命令开始安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -70,15 +70,15 @@ weight: 6920 1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 -2. 点击**自定义资源 CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 +2. 点击 **CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,然后选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 -4. 在该配置文件中,搜寻到 `network`,将 `network.ippool.type` 更改为 `calico`。完成后,点击右下角的**更新**保存配置。 +4. 在该配置文件中,搜寻到 `network`,将 `network.ippool.type` 更改为 `calico`。完成后,点击右下角的**确定**保存配置。 ```yaml network: @@ -99,6 +99,4 @@ weight: 6920 ## 验证组件的安装 -在**集群管理**页面,您可以在**网络管理**下看到**容器组 IP 池**。 - -![pod-ip-pool](/images/docs/zh-cn/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png) +在**集群管理**页面,您可以在**网络**下看到**容器组 IP 池**。 diff --git a/content/zh/docs/pluggable-components/service-mesh.md b/content/zh/docs/pluggable-components/service-mesh.md index 41ca8840d..c4dedf767 100644 --- a/content/zh/docs/pluggable-components/service-mesh.md +++ b/content/zh/docs/pluggable-components/service-mesh.md @@ -6,7 +6,7 @@ linkTitle: "KubeSphere 服务网格" weight: 6800 --- -KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和流量管理可视化。它拥有强大的工具包,包括**熔断机制、蓝绿部署、金丝雀发布、流量镜像、分布式链路追踪、可观测性和流量控制**等。KubeSphere 服务网格支持代码无侵入的微服务治理,帮助开发者快速上手,Istio 的学习曲线也极大降低。KubeSphere 服务网格的所有功能都旨在满足用户的业务需求。 +KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和流量管理可视化。它拥有强大的工具包,包括**熔断机制、蓝绿部署、金丝雀发布、流量镜像、链路追踪、可观测性和流量控制**等。KubeSphere 服务网格支持代码无侵入的微服务治理,帮助开发者快速上手,Istio 的学习曲线也极大降低。KubeSphere 服务网格的所有功能都旨在满足用户的业务需求。 有关更多信息,请参见[灰度发布](../../project-user-guide/grayscale-release/overview/)。 @@ -41,9 +41,9 @@ KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和 ### 在 Kubernetes 上安装 -在 Kubernetes 上安装 KubeSphere 的过程和教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中启用服务网格(可选组件)。 +在 Kubernetes 上安装 KubeSphere 的过程和教程[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 中的说明大致相同,不同之处是需要先在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中启用服务网格(可选组件)。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件,然后打开并开始编辑。 ```bash vi cluster-configuration.yaml @@ -59,7 +59,7 @@ KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和 3. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -68,15 +68,15 @@ KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。 -2. 点击**自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 +2. 点击 **CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,选择**编辑 YAML**。 -4. 在该 YAML 文件中,搜寻到 `servicemesh`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。 +4. 在该 YAML 文件中,搜寻到 `servicemesh`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**确定**,保存配置。 ```yaml servicemesh: @@ -100,15 +100,13 @@ KubeSphere 服务网格基于 [Istio](https://istio.io/),将微服务治理和 {{< tab "在仪表板中验证组件的安装" >}} -进入**服务组件**,查看 **Istio** 的状态。您可能会看到如下图所示的界面: - -![istio](/images/docs/zh-cn/enable-pluggable-components/kubesphere-service-mesh/istio.png) +进入**系统组件**,检查 **Istio** 标签页中的所有组件都处于**健康**状态。 {{}} {{< tab "通过 kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n istio-system diff --git a/content/zh/docs/pluggable-components/service-topology.md b/content/zh/docs/pluggable-components/service-topology.md index af26c1a8f..0aaf6c976 100644 --- a/content/zh/docs/pluggable-components/service-topology.md +++ b/content/zh/docs/pluggable-components/service-topology.md @@ -1,7 +1,7 @@ --- title: "服务拓扑图" keywords: "Kubernetes, KubeSphere, 服务, 拓扑图" -description: "了解如何启用服务拓扑图,以基于 Weave Scope 查看 Pod 的上下文详情。" +description: "了解如何启用服务拓扑图,以基于 Weave Scope 查看容器组的上下文详情。" linkTitle: "服务拓扑图" weight: 6915 --- @@ -41,9 +41,9 @@ weight: 6915 ### 在 Kubernetes 上安装 -[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件中首先启用服务拓扑图。 +[在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 时,您可以在 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件中首先启用服务拓扑图。 -1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml) 文件并进行编辑。 +1. 下载 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml) 文件并进行编辑。 ```bash vi cluster-configuration.yaml @@ -60,7 +60,7 @@ weight: 6915 3. 执行以下命令开始安装。 ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` @@ -70,15 +70,15 @@ weight: 6915 1. 使用 `admin` 用户登录控制台。点击左上角的**平台管理**,然后选择**集群管理**。 -2. 点击**自定义资源 CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 +2. 点击 **CRD**,然后在搜索栏中输入 `clusterconfiguration`。点击搜索结果查看其详情页。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不新增 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的 ,然后选择**编辑配置文件**。 +3. 在**自定义资源**中,点击 `ks-installer` 右侧的 ,然后选择**编辑 YAML**。 -4. 在该配置文件中,搜寻到 `network`,将 `network.topology.type` 更改为 `weave-scope`。完成后,点击右下角的**更新**保存配置。 +4. 在该配置文件中,搜寻到 `network`,将 `network.topology.type` 更改为 `weave-scope`。完成后,点击右下角的**确定**保存配置。 ```yaml network: @@ -103,15 +103,13 @@ weight: 6915 {{< tab "在仪表板中验证组件的安装" >}} -进入一个项目中,导航到**应用负载**下的**服务**,即可看到**拓扑图**选项卡下**服务**的拓扑图。 - -![topology1](/images/docs/zh-cn/enable-pluggable-components/service-topology/topology.png) +进入一个项目中,导航到**应用负载**下的**服务**,即可看到**服务拓扑**页签下**服务**的拓扑图。 {{}} {{< tab "通过 Kubectl 验证组件的安装" >}} -执行以下命令来检查 Pod 的状态: +执行以下命令来检查容器组的状态: ```bash kubectl get pod -n weave diff --git a/content/zh/docs/pluggable-components/uninstall-pluggable-components.md b/content/zh/docs/pluggable-components/uninstall-pluggable-components.md index caa23e46e..8a6497c71 100644 --- a/content/zh/docs/pluggable-components/uninstall-pluggable-components.md +++ b/content/zh/docs/pluggable-components/uninstall-pluggable-components.md @@ -1,112 +1,88 @@ --- -title: "Uninstall Pluggable Components from KubeSphere v3.1.x" +title: "KubeSphere 3.2.x 卸载可插拔组件" keywords: "Installer, uninstall, KubeSphere, Kubernetes" -description: "Learn how to uninstall each pluggable component in KubeSphere v3.1.x." -linkTitle: "Uninstall Pluggable Components from KubeSphere v3.1.x" +description: "学习如何在 KubeSphere 3.2.x 卸载所有可插拔组件。" +linkTitle: "KubeSphere 3.2.x 卸载可插拔组件" Weight: 6940 --- -After you [enable the pluggable components of KubeSphere](../../pluggable-components/), you can also uninstall them by performing the following steps. Please back up any necessary data before you uninstall these components. +[启用 KubeSphere 可插拔组件之后](../../pluggable-components/),还可以根据以下步骤卸载他们。请在卸载这些组件之前,备份所有重要数据。 {{< notice note >}} -The methods of uninstalling certain pluggable components on KubeSphere v3.1.x are different from the methods on KubeSphere v3.0.0. For more information about the uninstallation methods on KubeSphere v3.0.0, see [Uninstall Pluggable Components from KubeSphere](https://v3-0.docs.kubesphere.io/docs/faq/installation/uninstall-pluggable-components/). +KubeSphere 3.2.x 卸载某些可插拔组件的方法与 KubeSphere v3.0.0 不相同。有关 KubeSphere v3.0.0 卸载可插拔组件的详细方法,请参见[从 KubeSphere 上卸载可插拔组件](https://v3-0.docs.kubesphere.io/zh/docs/faq/installation/uninstall-pluggable-components/)。 + {{}} -## Prerequisites +## 准备工作 -You have to change the value of the field `enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration` before you uninstall any pluggable components except Service Topology and Pod IP Pools. +在卸载除服务拓扑图和容器组 IP 池之外的可插拔组件之前,必须将 CRD 配置文件 `ClusterConfiguration` 中的 `ks-installer` 中的 `enabled` 字段的值从 `true` 改为 `false`。 -Use either of the following methods to change the value of the field `enabled`: +使用下列其中一方法更改 `enabled` 字段的值: -- Run the following command to edit `ks-installer`: +- 运行以下命令编辑 `ks-installer`: - ```bash - kubectl -n kubesphere-system edit clusterconfiguration ks-installer - ``` +```bash +kubectl -n kubesphere-system edit clusterconfiguration ks-installer +``` -- Log in to the KubeSphere web console as `admin`, click **Platform** in the upper-left corner and select **Cluster Management**, and then go to **CRDs** to search for `ClusterConfiguration`. For more information, see [Enable Pluggable Components](../../../pluggable-components/). +- 使用 `admin` 身份登录 KubeSphere Web 控制台,左上角点击**平台管理**,选择**集群管理**,在**自定义资源 CRD** 中搜索 `ClusterConfiguration`。有关更多信息,请参见[启用可插拔组件](../../pluggable-components/)。 {{< notice note >}} -After the value is changed, you need to wait until the updating process is complete before you continue with any further operations. +更改值之后,需要等待配置更新完成,然后继续进行后续操作。 {{}} -## Uninstall KubeSphere App Store +## 卸载 KubeSphere 应用商店 -Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `openpitrix.store.enabled` 字段的值从 `true` 改为 `false`。 -## Uninstall KubeSphere DevOps +## 卸载 KubeSphere DevOps -1. Change the value of `devops.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. - -2. Run the command mentioned in [Prerequisites](#prerequisites) and then delete the code under `status.devops` in `ks-installer` of the CRD `ClusterConfiguration`. - -3. Run the following commands: +1. 卸载 DevOps: ```bash - helm -n kubesphere-devops-system delete ks-jenkins - helm -n kubesphere-devops-system delete uc + helm uninstall -n kubesphere-devops-system devops + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "remove", "path": "/status/devops"}]' + kubectl patch -n kubesphere-system cc ks-installer --type=json -p='[{"op": "replace", "path": "/spec/devops/enabled", "value": false}]' + ``` +2. 删除 DevOps 资源: + + ```bash + # 删除所有 DevOps 相关资源 + for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do + for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do + for devops_res in $(kubectl get $devops_crd -n $ns -oname); do + kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge + done + done + done + # 删除所有 DevOps CRD + kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io" | xargs -I crd_name kubectl delete crd crd_name + # 删除 DevOps 命名空间 + kubectl delete namespace kubesphere-devops-system ``` - ```bash - # Delete DevOps projects - for devopsproject in `kubectl get devopsprojects -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch devopsprojects $devopsproject -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for pip in `kubectl get pipeline -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch pipeline $pip -n `kubectl get pipeline -A | grep $pip | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibinaries in `kubectl get s2ibinaries -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibinaries $s2ibinaries -n `kubectl get s2ibinaries -A | grep $s2ibinaries | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibuilders in `kubectl get s2ibuilders -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibuilders $s2ibuilders -n `kubectl get s2ibuilders -A | grep $s2ibuilders | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2ibuildertemplates in `kubectl get s2ibuildertemplates -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2ibuildertemplates $s2ibuildertemplates -n `kubectl get s2ibuildertemplates -A | grep $s2ibuildertemplates | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - for s2iruns in `kubectl get s2iruns -A -o jsonpath="{.items[*].metadata.name}"` - do - kubectl patch s2iruns $s2iruns -n `kubectl get s2iruns -A | grep $s2iruns | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge - done - - kubectl delete devopsprojects --all 2>/dev/null - ``` + +## 卸载 KubeSphere 日志系统 + +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `logging.enabled` 字段的值从 `true` 改为 `false`。 + +2. 仅禁用日志收集: ```bash - kubectl delete ns kubesphere-devops-system - ``` - -## Uninstall KubeSphere Logging - -1. Change the value of `logging.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. - -2. To disable only log collection: - - ```bash - delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail + kubectl delete inputs.logging.kubesphere.io -n kubesphere-logging-system tail ``` {{< notice note >}} - After running this command, you can still view the container recent logs provided by Kubernetes by default. However, the container history logs will be cleared and you cannot browse them any more. + 运行此命令后,默认情况下仍可查看 Kubernetes 提供的容器最近日志。但是,容器历史记录日志将被清除,您无法再浏览它们。 {{}} -3. To uninstall Logging system including Elasticsearch: +3. 卸载包括 Elasticsearch 的日志系统,请执行以下操作: ```bash kubectl delete crd fluentbitconfigs.logging.kubesphere.io @@ -118,27 +94,34 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins helm uninstall elasticsearch-logging --namespace kubesphere-logging-system ``` - {{< notice note >}} + {{< notice warning >}} - This operation may cause anomalies in Auditing, Events, and Service Mesh. + 此操作可能导致审计、事件和服务网格的异常。 {{}} + +3. 运行以下命令: -## Uninstall KubeSphere Events + ```bash + kubectl delete deployment logsidecar-injector-deploy -n kubesphere-logging-system + kubectl delete ns kubesphere-logging-system + ``` -1. Change the value of `events.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +## 卸载 KubeSphere 事件系统 -2. Run the following command: +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `events.enabled` 字段的值从 `true` 改为 `false`。 + +2. 运行以下命令: ```bash helm delete ks-events -n kubesphere-logging-system ``` -## Uninstall KubeSphere Alerting +## 卸载 KubeSphere 告警系统 -1. Change the value of `alerting.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `alerting.enabled` 字段的值从 `true` 改为 `false`。 -2. Run the following command: +2. 运行以下命令: ```bash kubectl -n kubesphere-monitoring-system delete thanosruler kubesphere @@ -146,28 +129,28 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins {{< notice note >}} - Notification is installed in KubeSphere v3.1.x by default, so you do not need to uninstall it. + KubeSphere 3.2.1 通知系统为默认安装,您无需卸载。 {{}} -## Uninstall KubeSphere Auditing +## 卸载 KubeSphere 审计 -1. Change the value of `auditing.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `auditing.enabled` 字段的值从 `true` 改为 `false`。 -2. Run the following commands: +2. 运行以下命令: ```bash helm uninstall kube-auditing -n kubesphere-logging-system - kubectl delete crd awh - kubectl delete crd ar + kubectl delete crd rules.auditing.kubesphere.io + kubectl delete crd webhooks.auditing.kubesphere.io ``` -## Uninstall KubeSphere Service Mesh +## 卸载 KubeSphere 服务网格 -1. Change the value of `servicemesh.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中 `ks-installer` 参数的 `servicemesh.enabled` 字段的值从 `true` 改为 `false`。 -2. Run the following commands: +2. 运行以下命令: ```bash curl -L https://istio.io/downloadIstio | sh - @@ -180,15 +163,15 @@ Change the value of `openpitrix.store.enabled` from `true` to `false` in `ks-ins helm -n istio-system delete jaeger-operator ``` -## Uninstall Network Policies +## 卸载网络策略 -For the component NetworkPolicy, disabling it does not require uninstalling the component as its controller is now inside `ks-controller-manager`. If you want to remove it from the KubeSphere console, change the value of `network.networkpolicy.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +对于 NetworkPolicy 组件,禁用它不需要卸载组件,因为其控制器位于 `ks-controller-manager` 中。如果想要将其从 KubeSphere 控制台中移除,将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.networkpolicy.enabled` 的值从 `true` 改为 `false`。 -## Uninstall Metrics Server +## 卸载 Metrics Server -1. Change the value of `metrics_server.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `metrics_server.enabled` 的值从 `true` 改为 `false`。 -2. Run the following commands: +2. 运行以下命令: ```bash kubectl delete apiservice v1beta1.metrics.k8s.io @@ -196,34 +179,34 @@ For the component NetworkPolicy, disabling it does not require uninstalling the kubectl -n kube-system delete deployment metrics-server ``` -## Uninstall Service Topology +## 卸载服务拓扑图 -1. Change the value of `network.topology.type` from `weave-scope` to `none` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.topology.type` 的值从 `weave-scope` 改为 `none`。 -2. Run the following command: +2. 运行以下命令: ```bash kubectl delete ns weave ``` -## Uninstall Pod IP Pools +## 卸载容器组 IP 池 -Change the value of `network.ippool.type` from `calico` to `none` in `ks-installer` of the CRD `ClusterConfiguration`. +将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `network.ippool.type` 的值从 `calico` 改为 `none`。 -## Uninstall KubeEdge +## 卸载 KubeEdge -1. Change the value of `kubeedge.enabled` from `true` to `false` in `ks-installer` of the CRD `ClusterConfiguration`. +1. 将 CRD `ClusterConfiguration` 配置文件中参数 `ks-installer` 中 `kubeedege.enabled` 的值从 `true` 改为 `false`。 -2. Run the following commands: +2. 运行以下命令: ```bash helm uninstall kubeedge -n kubeedge kubectl delete ns kubeedge ``` - + {{< notice note >}} - - After the uninstallation, you will not be able to add edge nodes to your cluster. - + + 卸载后,您将无法为集群添加边缘节点。 + {{}} diff --git a/content/zh/docs/project-administration/container-limit-ranges.md b/content/zh/docs/project-administration/container-limit-ranges.md index 3799ce809..00341fae6 100644 --- a/content/zh/docs/project-administration/container-limit-ranges.md +++ b/content/zh/docs/project-administration/container-limit-ranges.md @@ -14,16 +14,14 @@ weight: 13400 ## 准备工作 -您需要有一个可用的企业空间、一个项目和一个帐户 (`project-admin`)。该帐户必须在项目层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +您需要有一个可用的企业空间、一个项目和一个用户 (`project-admin`)。该用户必须在项目层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 ## 设置默认限制范围 -1. 以 `project-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您在**概览**页面上会看到默认限制范围尚未设置。点击**容器资源默认请求未设置**旁的**设置**来配置限制范围。 +1. 以 `project-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您在**概览**页面上会看到默认配额尚未设置。点击**默认容器配额未设置**旁的**编辑配额**来配置限制范围。 2. 在弹出的对话框中,您可以看到 KubeSphere 默认不设置任何请求或限制。要设置请求和限制来控制 CPU 和内存资源,请移动滑块至期望的值或者直接输入数值。字段留空意味着不设置任何请求或限制。 - ![默认限制范围](/images/docs/zh-cn/project-administration/container-limit-ranges/default-limit-range.PNG) - {{< notice note >}} 限制必须大于请求。 @@ -32,18 +30,14 @@ weight: 13400 3. 点击**确定**完成限制范围设置。 -4. 在**项目设置**下的**基本信息**页面,您可以查看项目中容器的默认限制范围。 +4. 在**项目设置**下的**基本信息**页面,您可以查看项目中容器的默认容器配额。 - ![查看限制范围](/images/docs/zh-cn/project-administration/container-limit-ranges/view-limit-ranges.PNG) +5. 要更改默认容器配额,请在**基本信息**页面点击**管理**,然后选择**编辑默认容器配额**。 -5. 要更改默认限制范围,请在**基本信息**页面点击**项目管理**,然后选择**编辑资源默认请求**。 - -6. 在弹出的对话框中直接更改限制范围,然后点击**确定**。 +6. 在弹出的对话框中直接更改容器配额,然后点击**确定**。 7. 当您创建工作负载时,容器的请求和限制将预先填充对应的值。 - ![工作负载默认值](/images/docs/zh-cn/project-administration/container-limit-ranges/workload-values.PNG) - {{< notice note >}} 有关更多信息,请参见[容器镜像设置](../../project-user-guide/application-workloads/container-image-settings/)中的**资源请求**。 diff --git a/content/zh/docs/project-administration/disk-log-collection.md b/content/zh/docs/project-administration/disk-log-collection.md index 3e27276ac..4636c1af5 100644 --- a/content/zh/docs/project-administration/disk-log-collection.md +++ b/content/zh/docs/project-administration/disk-log-collection.md @@ -1,26 +1,26 @@ --- -title: "落盘日志收集" -keywords: 'KubeSphere, Kubernetes, 项目, 落盘, 日志, 收集' -description: '启用落盘日志收集,对日志进行统一收集、管理和分析。' -linkTitle: "落盘日志收集" +title: "日志收集" +keywords: 'KubeSphere, Kubernetes, 项目, 日志, 收集' +description: '启用日志收集,对日志进行统一收集、管理和分析。' +linkTitle: "日志收集" weight: 13600 --- KubeSphere 支持多种日志收集方式,使运维团队能够以灵活统一的方式收集、管理和分析日志。 -本教程演示了如何为示例应用收集落盘日志。 +本教程演示了如何为示例应用收集日志。 ## 准备工作 -- 您需要创建企业空间、项目和帐户 (`project-admin`)。该帐户必须被邀请到项目中,并在项目级别具有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +- 您需要创建企业空间、项目和帐户 (`project-admin`)。该用户必须被邀请到项目中,并在项目级别具有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 - 您需要启用 [KubeSphere 日志系统](../../pluggable-components/logging/)。 -## 启用落盘日志收集 +## 启用日志收集 1. 以 `project-admin` 身份登录 KubeSphere 的 Web 控制台,进入项目。 -2. 在左侧导航栏中,选择**项目设置**中的**高级设置**。在**落盘日志收集**一栏下,点击 以启用该功能。 +2. 在左侧导航栏中,选择**项目设置**中的**日志收集**,点击 以启用该功能。 ## 创建部署 @@ -29,12 +29,10 @@ KubeSphere 支持多种日志收集方式,使运维团队能够以灵活统一 2. 在出现的对话框中,设置部署的名称(例如 `demo-deployment`),再点击**下一步**。 -3. 在**容器镜像**下,点击**添加容器镜像**。 +3. 在**容器组设置**下,点击**添加容器**。 4. 在搜索栏中输入 `alpine`,以该镜像(标签:`latest`)作为示例。 - ![alpine-image](/images/docs/zh-cn/project-administration/disk-log-collection/alpine-image.png) - 5. 向下滚动并勾选**启动命令**。在**运行命令**和**参数**中分别输入以下值,点击 **√**,然后点击**下一步**。 **运行命令** @@ -55,13 +53,9 @@ KubeSphere 支持多种日志收集方式,使运维团队能够以灵活统一 {{}} - ![run-command](/images/docs/zh-cn/project-administration/disk-log-collection/run-command.png) +6. 在**存储卷设置**选项卡下,切换 启用**收集存储卷上的日志**,点击**挂载存储卷**。 -6. 在**挂载存储**选项卡下,切换 启用**落盘日志收集**,点击**添加存储卷**。 - -7. 在**临时存储卷**选项卡下,输入存储卷名称(例如 `demo-disk-log-collection`),并设置访问模式和路径。请参考以下示例。 - - ![volume-example](/images/docs/zh-cn/project-administration/disk-log-collection/volume-example.png) +7. 在**临时存储卷**选项卡下,输入存储卷名称(例如 `demo-disk-log-collection`),并设置访问模式和路径。 点击 **√**,然后点击**下一步**继续。 @@ -77,9 +71,8 @@ KubeSphere 支持多种日志收集方式,使运维团队能够以灵活统一 1. 在**部署**选项卡下,点击刚才创建的部署以访问其详情页。 -2. 在**资源状态**中,点击 查看容器详情,然后点击 `logsidecar-container`(filebeat 容器)日志图标 以检查落盘日志。 +2. 在**资源状态**中,点击 查看容器详情,然后点击 `logsidecar-container`(filebeat 容器)日志图标 以检查日志。 -3. 或者,您也可以使用右下角**工具箱**中的**日志查询**功能来查看标准输出日志。例如,使用该部署的 Pod 名称进行模糊匹配: +3. 或者,您也可以使用右下角**工具箱**中的**日志查询**功能来查看标准输出日志。例如,使用该部署的 Pod 名称进行模糊匹配。 - ![fuzzy-match](/images/docs/zh-cn/project-administration/disk-log-collection/fuzzy-match.png) diff --git a/content/zh/docs/project-administration/project-and-multicluster-project.md b/content/zh/docs/project-administration/project-and-multicluster-project.md index ac1c9b98e..f4a96359d 100644 --- a/content/zh/docs/project-administration/project-and-multicluster-project.md +++ b/content/zh/docs/project-administration/project-and-multicluster-project.md @@ -15,14 +15,14 @@ KubeSphere 中的项目即 Kubernetes [命名空间](https://kubernetes.io/zh/do ## 准备工作 -- 您需要有一个可用的企业空间和一个帐户 (`project-admin`)。该帐户必须在该企业空间拥有 `workspace-self-provisioner` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +- 您需要有一个可用的企业空间和一个用户 (`project-admin`)。该用户必须在该企业空间拥有 `workspace-self-provisioner` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 - 在创建多集群项目前,您需要通过[直接连接](../../multicluster-management/enable-multicluster/direct-connection/)或[代理连接](../../multicluster-management/enable-multicluster/agent-connection/)启用多集群功能。 ## 项目 ### 创建项目 -1. 前往企业空间的**项目管理**页面,点击**项目**选项卡下的**创建**。 +1. 前往企业空间的**项目**页面,点击**项目**选项卡下的**创建**。 {{< notice note >}} @@ -36,23 +36,19 @@ KubeSphere 中的项目即 Kubernetes [命名空间](https://kubernetes.io/zh/do 3. 创建的项目会显示在下图所示的列表中。您可以点击项目名称打开**概览**页面。 - ![project-list](/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-list.png) ### 编辑项目 -1. 前往您的项目,选择**项目设置**下的**基本信息**,在页面右侧点击**项目管理**。 +1. 前往您的项目,选择**项目设置**下的**基本信息**,在页面右侧点击**管理**。 2. 从下拉菜单中选择**编辑信息**。 - ![project-basic-information](/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-basic-information.png) - {{< notice note >}} - 项目名称无法编辑。如需修改其他信息,请参考相应的文档教程。 {{}} -3. 若要删除项目,选择该下拉菜单中的**删除项目**,在弹出的对话框中输入项目名称,点击**确定**。 +3. 若要删除项目,选择该下拉菜单中的**删除**,在弹出的对话框中输入项目名称,点击**确定**。 {{< notice warning >}} @@ -64,7 +60,7 @@ KubeSphere 中的项目即 Kubernetes [命名空间](https://kubernetes.io/zh/do ### 创建多集群项目 -1. 前往企业空间的**项目管理**页面,点击**多集群项目**选项卡,再点击**创建**。 +1. 前往企业空间的**项目**页面,点击**多集群项目**选项卡,再点击**创建**。 {{< notice note >}} @@ -74,19 +70,18 @@ KubeSphere 中的项目即 Kubernetes [命名空间](https://kubernetes.io/zh/do {{}} 2. 在弹出的**创建多集群项目**窗口中输入项目名称,并根据需要添加别名或说明。在**集群设置**下,点击**添加集群**为项目选择多个集群,然后点击**确定**。 +3. 创建的多集群项目会显示在列表中。点击多集群项目右侧的 ,从下拉菜单中选择一个操作: -3. 创建的多集群项目会显示在下图所示的列表中。您可以点击项目名称打开**概览**页面。 - - ![multi-cluster-list](/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-list.png) + - **编辑信息**:编辑多集群项目的基本信息。 + - **添加集群**:在弹出对话框的下拉列表中选择一个集群并点击**确定**,为多集群项目添加一个集群。 + - **删除**:删除多集群项目。 ### 编辑多集群项目 -1. 前往您的多集群项目,选择**项目设置**下的**基本信息**,在页面右侧点击**项目管理**。 +1. 前往您的多集群项目,选择**项目设置**下的**基本信息**,在页面右侧点击**管理**。 2. 从下拉菜单中选择**编辑信息**。 - ![multi-cluster-basic-information](/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png) - {{< notice note >}} 项目名称无法编辑。如需修改其他信息,请参考相应的文档教程。 diff --git a/content/zh/docs/project-administration/project-gateway.md b/content/zh/docs/project-administration/project-gateway.md index a75ded47e..94657dbc7 100644 --- a/content/zh/docs/project-administration/project-gateway.md +++ b/content/zh/docs/project-administration/project-gateway.md @@ -8,29 +8,27 @@ weight: 13500 KubeSphere 项目中的网关是一个[ NGINX Ingress 控制器](https://www.nginx.com/products/nginx-ingress-controller/)。KubeSphere 内置的用于 HTTP 负载均衡的机制称为[应用路由](../../project-user-guide/application-workloads/routes/),它定义了从外部到集群服务的连接规则。如需允许从外部访问服务,用户可创建路由资源来定义 URI 路径、后端服务名称等信息。 -在 KubeSphere 3.0,项目网关单独运行,即每个项目都有自己的 Ingress 控制器。在下一个发布版本中,KubeSphere 除了提供项目范围的网关外,还将提供集群范围的网关,使得所有项目都能共享相同的网关。 +KubeSphere 除了提供项目范围的网关外,还提供[集群范围的网关](../../cluster-administration/cluster-settings/cluster-gateway/),使得所有项目都能共享全局网关。 -本教程演示如何在 KubeSphere 中设置网关以从外部访问服务和路由。 +本教程演示如何在 KubeSphere 中开启项目网关以从外部访问服务和路由。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 (`project-admin`)。该帐户必须被邀请至项目,并且在项目中的角色为 `admin`。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../docs/quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 (`project-admin`)。该用户必须被邀请至项目,并且在项目中的角色为 `admin`。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../docs/quick-start/create-workspace-and-project/)。 -## 设置网关 +## 开启网关 -1. 以 `project-admin` 用户登录 KubeSphere Web 控制台,进入您的项目,从左侧导航栏进入**项目设置**下的**高级设置**页面,然后点击**设置网关**。 - - ![set-project-gateway](/images/docs/zh-cn/project-administration/project-gateway/set-project-gateway.jpg) +1. 以 `project-admin` 用户登录 KubeSphere Web 控制台,进入您的项目,从左侧导航栏进入**项目设置**下的**网关设置**页面,然后点击**开启网关**。 2. 在弹出的对话框中选择网关的访问方式。 - ![access-method](/images/docs/zh-cn/project-administration/project-gateway/access-method.png) - **NodePort**:通过网关访问服务对应的节点端口。 - + **LoadBalancer**:通过网关访问服务的单独 IP 地址。 -3. 在**设置网关**对话框,您可以启用**应用治理**以使用 Tracing 功能和[不同的灰度发布策略](../../project-user-guide/grayscale-release/overview/)。如果启用**应用治理**后无法访问路由,请在路由 (Ingress) 中添加注解(例如 `nginx.ingress.kubernetes.io/service-upstream: true`)。 +3. 在**开启网关**对话框,您可以启用**链路追踪**。创建自制应用时,您必须开启**链路追踪**,以使用链路追踪功能和[不同的灰度发布策略](../../project-user-guide/grayscale-release/overview/)。如果启用**链路追踪**后无法访问路由,请在路由 (Ingress) 中添加注解(例如 `nginx.ingress.kubernetes.io/service-upstream: true`)。 + +3. 在**配置选项**中,添加键值对,为 NGINX Ingress 控制器的系统组件提供配置信息。有关更多信息,请参阅 [NGINX Ingress 控制器官方文档](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#configuration-options)。 4. 选择访问方式后点击**保存**。 @@ -38,8 +36,6 @@ KubeSphere 项目中的网关是一个[ NGINX Ingress 控制器](https://www.ngi 如果您选择 **NodePort**,KubeSphere 将为 HTTP 请求和 HTTPS 请求分别设置一个端口。您可以用 `EIP:NodePort` 或 `Hostname:NodePort` 地址访问服务。 -![nodeport](/images/docs/zh-cn/project-administration/project-gateway/nodeport.jpg) - 例如,如果您的服务配置了的弹性 IP 地址 (EIP),请访问: - `http://EIP:32734` @@ -62,11 +58,7 @@ KubeSphere 项目中的网关是一个[ NGINX Ingress 控制器](https://www.ngi ## LoadBalancer 在选择 **LoadBalancer** 前,您必须先配置负载均衡器。负载均衡器的 IP 地址将与网关绑定以便内部的服务和路由可以访问。 - -![lb](/images/docs/zh-cn/project-administration/project-gateway/lb.png) - {{< notice note >}} - -云厂商通常支持负载均衡器插件。如果在主流的 Kubernetes Engine 上安装 KubeSphere,您可能会发现环境中已有可用的负载均衡器。如果在裸金属环境中安装 KubeSphere,您可以使用 [PorterLB](https://github.com/kubesphere/porter) 作为负载均衡器。 +云厂商通常支持负载均衡器插件。如果在主流的 Kubernetes Engine 上安装 KubeSphere,您可能会发现环境中已有可用的负载均衡器。如果在裸金属环境中安装 KubeSphere,您可以使用 [OpenELB](https://github.com/kubesphere/openelb) 作为负载均衡器。 {{}} \ No newline at end of file diff --git a/content/zh/docs/project-administration/project-network-isolation.md b/content/zh/docs/project-administration/project-network-isolation.md index 1e2c6ea6c..bfb07de28 100644 --- a/content/zh/docs/project-administration/project-network-isolation.md +++ b/content/zh/docs/project-administration/project-network-isolation.md @@ -11,7 +11,7 @@ weight: 13300 ## 准备工作 - 已经启用[网络策略](../../pluggable-components/network-policy/)。 -- 您必须有一个可用的项目和一个在项目层级拥有 `admin` 角色的帐户 (`project-admin`)。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +- 您必须有一个可用的项目和一个在项目层级拥有 `admin` 角色的用户 (`project-admin`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 {{< notice note >}} @@ -23,8 +23,6 @@ weight: 13300 1. 以 `project-admin` 身份登录 KubeSphere 控制台,进入您的项目,在**项目设置**下选择**网络隔离**。项目网络隔离默认关闭。 - ![项目网络隔离](/images/docs/zh-cn/project-administration/project-network-isolation/project-network-isolation.PNG) - 2. 要启用项目网络隔离,请点击**开启**。 {{< notice note >}} @@ -35,8 +33,6 @@ weight: 13300 3. 您也可以在这个页面关闭网络隔离。 - ![关闭隔离](/images/docs/zh-cn/project-administration/project-network-isolation/isolation-off.PNG) - {{< notice note >}} 关闭网络隔离时,先前创建的所有网络策略都将被删除。 @@ -61,21 +57,18 @@ weight: 13300 #### 放行来自不同项目的工作负载的入站流量 -1. 在当前项目的**网络隔离**页面,选择**集群内部白名单**选项卡。 +1. 在当前项目的**网络隔离**页面,选择**内部白名单**选项卡。 -2. 点击**添加白名单**。 +2. 点击**添加白名单条目**。 -3. 在**方向**下选择**入口**。 +3. 在**流量方向**下选择**入站**。 4. 在**类型**下选择**项目**选项卡。 5. 选择 `demo-project-2` 项目。 - ![入站规则](/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule.PNG) - 6. 点击**确定**,然后您可以在白名单中看到该项目。 - ![入站规则已添加](/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule-added.PNG) {{< notice note >}} @@ -85,11 +78,11 @@ weight: 13300 #### 放行前往不同项目的服务的出站流量 -1. 在当前项目的**网络隔离**页面,选择**集群内部白名单**选项卡。 +1. 在当前项目的**网络隔离**页面,选择**内部白名单**选项卡。 -2. 点击**添加白名单**。 +2. 点击**添加白名单条目**。 -3. 在**方向**下选择**出口**。 +3. 在**流量方向**下选择**出站**。 4. 在**类型**下选择**服务**选项卡。 @@ -97,11 +90,8 @@ weight: 13300 6. 选择允许接收出站流量的服务。在本例中,请选择 `nginx`。 - ![出站规则](/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule.PNG) - 7. 点击**确定**,然后您可以在白名单中看到该服务。 - ![出站规则已添加](/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule-added.PNG) {{< notice note >}} @@ -115,19 +105,16 @@ KubeSphere 使用 CIDR 来区分对等方。假设当前项目中已创建一个 #### 放行来自集群外部客户端的入站流量 -1. 在当前项目的**网络隔离**页面,选择**集群外部 IP 地址**选项卡,然后点击**添加规则**。 +1. 在当前项目的**网络隔离**页面,选择**外部白名单**选项卡,然后点击**添加白名单条目**。 -2. 在**方向**下选择**入口**。 +2. 在**流量方向**下选择**入站**。 -3. 在 **CIDR** 中输入 `192.168.1.1/32`。 +3. 在 **网段** 中输入 `192.168.1.1/32`。 4. 选择 `TCP` 协议并输入 `80` 作为端口号。 - ![入站-CIDR](/images/docs/zh-cn/project-administration/project-network-isolation/ingress-CIDR.PNG) - 5. 点击**确定**,然后您可以看到该规则已经添加。 - ![入站-CIDR-已设置](/images/docs/zh-cn/project-administration/project-network-isolation/ingress-cidr-set.PNG) {{< notice note >}} @@ -139,19 +126,16 @@ KubeSphere 使用 CIDR 来区分对等方。假设当前项目中已创建一个 #### 放行前往集群外部服务的出站流量 -1. 在当前项目的**网络隔离**页面,选择**集群外部 IP 地址**选项卡,然后点击**添加规则**。 +1. 在当前项目的**网络隔离**页面,选择**外部白名单**选项卡,然后点击**添加白名单条目**。 -2. 在**方向**下选择**出口**。 +2. 在**流量方向**下选择**出站**。 -3. 在 **CIDR** 中输入 `10.1.0.1/32`。 +3. 在 **网段** 中输入 `10.1.0.1/32`。 4. 选择 `TCP` 协议并输入 `80` 作为端口号。 - ![出站-CIDR](/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR.PNG) - 5. 点击**确定**,然后您可以看到该规则已经添加。 - ![出站-CIDR-已添加](/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR-added.PNG) {{< notice note >}} diff --git a/content/zh/docs/project-administration/role-and-member-management.md b/content/zh/docs/project-administration/role-and-member-management.md index 2765a4aae..6e84e03bc 100644 --- a/content/zh/docs/project-administration/role-and-member-management.md +++ b/content/zh/docs/project-administration/role-and-member-management.md @@ -17,7 +17,7 @@ weight: 13200 ## 准备工作 -您需要至少创建一个项目(例如 `demo-project`)。此外,您还需要准备一个在项目级别具有 `admin` 角色的帐户(例如 `project-admin`)。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +您需要至少创建一个项目(例如 `demo-project`)。此外,您还需要准备一个在项目级别具有 `admin` 角色的用户(例如 `project-admin`)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 ## 内置角色 @@ -46,8 +46,6 @@ weight: 13200 1. 以 `project-admin` 身份登录控制台。在**项目角色**中,点击一个角色(例如,`admin`)以查看角色详情。 - ![project-role-detail](/images/docs/zh-cn/project-administration/role-and-member-management/project-role-detail.png) - 2. 点击**授权用户**选项卡,查看所有被授予该角色的用户。 ## 创建项目角色 @@ -56,7 +54,7 @@ weight: 13200 2. 在**项目角色**中,点击**创建**并设置**角色标识符**(例如,`project-monitor`)。点击**编辑权限**继续。 -3. 在弹出的窗口中,权限归类在不同的**模块**下。在本示例中,为该角色选择**应用负载**中的**应用负载查看**,以及**监控告警**中的**告警消息查看**和**告警策略查看**。点击**确定**完成操作。 +3. 在弹出的窗口中,权限归类在不同的**功能模块**下。在本示例中,为该角色选择**应用负载**中的**应用负载查看**,以及**监控告警**中的**告警消息查看**和**告警策略查看**。点击**确定**完成操作。 {{< notice note >}} @@ -66,11 +64,10 @@ weight: 13200 4. 新创建的角色将在**项目角色**中列出,点击右侧的 以编辑该角色。 - ![project-role-list](/images/docs/zh-cn/project-administration/role-and-member-management/project-role-list.png) ## 邀请新成员 -1. 转到**项目设置**下的**项目成员**,点击**邀请成员**。 +1. 转到**项目设置**下的**项目成员**,点击**邀请**。 2. 点击右侧的 以邀请一名成员加入项目,并为其分配一个角色。 @@ -78,7 +75,5 @@ weight: 13200 4. 若要编辑现有成员的角色或将其从项目中移除,点击右侧的 并选择对应的操作。 - ![project-member-list](/images/docs/zh-cn/project-administration/role-and-member-management/project-member-list.png) - diff --git a/content/zh/docs/project-user-guide/alerting/alerting-message.md b/content/zh/docs/project-user-guide/alerting/alerting-message.md index 4f9881db5..0dbba710c 100644 --- a/content/zh/docs/project-user-guide/alerting/alerting-message.md +++ b/content/zh/docs/project-user-guide/alerting/alerting-message.md @@ -12,18 +12,16 @@ weight: 10720 ## 准备工作 * 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 -* 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +* 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 * 您需要创建一个工作负载级别的告警策略并且已经触发该告警。有关更多信息,请参考[告警策略(工作负载级别)](../alerting-policy/)。 ## 查看告警消息 1. 使用 `project-regular` 帐户登录控制台并进入您的项目,导航到**监控告警**下的**告警消息**。 -2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示您在告警通知中定义的标题和消息。如需查看某一告警消息的详情,点击该告警策略的名称,然后在出现的页面上点击**告警消息**选项卡。 +2. 在**告警消息**页面,可以看到列表中的全部告警消息。第一列显示您在告警通知中定义的标题和消息。如需查看某一告警消息的详情,点击该告警策略的名称,然后在显示的页面中点击**告警历史**选项卡。 - ![alerting-messages](/images/docs/zh-cn/project-user-guide/alerting/alerting-messages/alerting-messages.png) - -3. 在**告警消息**选项卡,您可以看到告警级别、告警资源以及告警激活时间。 +3. 在**告警历史**选项卡,您可以看到告警级别、监控目标以及告警激活时间。 ## 查看通知 diff --git a/content/zh/docs/project-user-guide/alerting/alerting-policy.md b/content/zh/docs/project-user-guide/alerting/alerting-policy.md index a434a8470..f80f2508d 100644 --- a/content/zh/docs/project-user-guide/alerting/alerting-policy.md +++ b/content/zh/docs/project-user-guide/alerting/alerting-policy.md @@ -12,29 +12,27 @@ KubeSphere 支持针对节点和工作负载的告警策略。本教程演示如 - 您需要启用 [KubeSphere 告警系统](../../../pluggable-components/alerting/)。 - 若想接收告警通知,您需要预先配置一个[通知渠道](../../../cluster-administration/platform-settings/notification-management/configure-email/)。 -- 您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要确保项目中存在工作负载。如果项目中没有工作负载,请参见[部署并访问 Bookinfo](../../../quick-start/deploy-bookinfo-to-k8s/) 来创建示例应用。 ## 创建告警策略 1. 以 `project-regular` 身份登录控制台并访问您的项目。导航到**监控告警**下的**告警策略**,点击**创建**。 -2. 在出现的对话框中,提供如下基本信息。点击**下一步**继续。 +2. 在弹出的对话框中,提供如下基本信息。点击**下一步**继续。 - **名称**:使用简明名称作为其唯一标识符,例如 `alert-demo`。 - **别名**:帮助您更好地识别告警策略。 - **描述信息**:对该告警策略的简要介绍。 - - **告警持续时间(分钟)**:若在告警持续时间内的任意时间点均满足为告警策略定义的条件,告警将会触发。 + - **阈值时间(分钟)**:告警规则中设置的情形持续时间达到该阈值后,告警策略将变为触发中状态。 - **告警级别**:提供的值包括**一般告警**、**重要告警**和**危险告警**,代表告警的严重程度。 -3. 在**告警规则**选项卡,您可以使用规则模板或创建自定义规则。若想使用模板,请填写以下字段。 +3. 在**规则设置**选项卡,您可以使用规则模板或创建自定义规则。若想使用模板,请填写以下字段。 - - **资源类型**:选择想要监控的资源类型,例如**部署**,**有状态副本集**或**守护进程集**。 + - **资源类型**:选择想要监控的资源类型,例如**部署**、**有状态副本集**或**守护进程集**。 - **监控目标**:取决于您所选择的资源类型,目标可能有所不同。如果项目中没有工作负载,则无法看到任何监控目标。 - **告警规则**:为告警策略定义规则。这些规则基于 Prometheus 表达式,满足条件时将会触发告警。您可以对 CPU、内存等对象进行监控。 - ![rule-template1](/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/rule-template1.png) - {{< notice note >}} 您可以在**监控指标**字段输入表达式(支持自动补全),以使用 PromQL 创建自定义规则。有关更多信息,请参见 [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/)。 @@ -43,24 +41,20 @@ KubeSphere 支持针对节点和工作负载的告警策略。本教程演示如 点击**下一步**继续。 -4. 在**通知设置**选项卡,输入想要在包含在通知中的告警标题和消息,然后点击**创建**。 +4. 在**消息设置**选项卡,输入想要在包含在通知中的告警标题和消息,然后点击**创建**。 -5. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到告警持续时间后,将变为**触发中**状态。 +5. 告警策略刚创建后将显示为**未触发**状态;一旦满足规则表达式中的条件,则会首先达到**待触发**状态;满足告警条件的时间达到阈值时间后,将变为**触发中**状态。 ## 编辑告警策略 若要在创建后编辑告警策略,点击**告警策略**页面右侧的 。 -1. 点击下拉菜单中的**编辑**,根据与创建时相同的步骤来编辑告警策略。点击**通知设置**页面的**更新**保存更改。 - - ![alert-policy-created](/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alert-policy-created.png) +1. 点击下拉菜单中的**编辑**,按照创建时相同的步骤来编辑告警策略。点击**消息设置**页面的**确定**保存更改。 2. 点击下拉菜单中的**删除**来删除告警策略。 ## 查看告警策略 -在**告警策略**页面,点击任一告警策略来查看其详情,包括告警规则和告警消息。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 +在**告警策略**页面,点击任一告警策略来查看其详情,包括告警规则和告警历史。您还可以看到创建告警策略时基于所使用模板的告警规则表达式。 -在**监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**通知设置**显示您在通知中设置的自定义消息。 - -![alerting-policy-detail](/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png) \ No newline at end of file +在**告警监控**下,**告警监控**图显示一段时间内的实际资源使用情况或使用量。**告警消息**显示您在通知中设置的自定义消息。 diff --git a/content/zh/docs/project-user-guide/application-workloads/container-image-settings.md b/content/zh/docs/project-user-guide/application-workloads/container-image-settings.md index 4f743a690..fdc2e36df 100644 --- a/content/zh/docs/project-user-guide/application-workloads/container-image-settings.md +++ b/content/zh/docs/project-user-guide/application-workloads/container-image-settings.md @@ -1,36 +1,39 @@ --- -title: "容器镜像设置" +title: "容器组设置" keywords: 'KubeSphere, Kubernetes, 镜像, 工作负载, 设置, 容器' -description: '在为工作负载设置容器镜像时,详细了解仪表板上的不同属性。' +description: '在为工作负载设置容器组时,详细了解仪表板上的不同属性。' weight: 10280 --- -创建部署 (Deployment)、有状态副本集 (StatefulSet) 或者守护进程集 (DaemonSet) 时,您需要指定一个容器镜像。同时,KubeSphere 向用户提供多种选项,用于自定义工作负载配置,例如健康检查探针、环境变量和启动命令。本页内容详细说明了**容器镜像**中的不同属性。 +创建部署 (Deployment)、有状态副本集 (StatefulSet) 或者守护进程集 (DaemonSet) 时,您需要指定一个容器组。同时,KubeSphere 向用户提供多种选项,用于自定义工作负载配置,例如健康检查探针、环境变量和启动命令。本页内容详细说明了**容器组设置**中的不同属性。 {{< notice tip >}} -您可以在右上角启用**编辑模式**,查看仪表板上的属性对应到清单文件(YAML 格式)中的值。 +您可以在右上角启用**编辑 YAML**,查看仪表板上的属性对应到清单文件(YAML 格式)中的值。 {{}} -## 容器镜像 +## 容器组设置 ### 容器组副本数量 -点击 图标设置 Pod(即容器组)副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。该选项对守护进程集不可用。 +点击 图标设置容器组副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。该选项对守护进程集不可用。 -![Pod 副本](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/pod-replicas.PNG) +如果您在多集群项目中创建部署,请在**副本调度模式**下选择一个副本调度模式: -### 添加容器镜像 +- **指定副本数量**:选择集群并设置每个集群的容器组副本数。 +- **指定权重**:选择集群,在**副本总数**中设置容器组副本总数,并指定每个集群的权重。容器组副本将根据权重成比例地调度到每个集群。若要在创建部署后修改权重,请点击部署名称前往其详情页,在**资源状态**页签下的**权重**区域修改权重。 -点击**添加容器镜像**后,您会看到如下图所示的界面。 +如果您在多集群项目中创建有状态副本集,请在**容器组副本数量**下选择集群并设置每个集群的容器组副本数。 -![添加容器镜像](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/add-container-explain.PNG) +### 添加容器 + +点击**添加容器**来添加容器。 #### 镜像搜索栏 -您可以点击右边的 ,从列表中选择一个镜像,或者输入镜像名称进行搜索。KubeSphere 提供 Docker Hub 的镜像以及您的私有镜像仓库的镜像。如果想使用私有镜像仓库,您需要先在**配置中心**下的**密钥**中创建镜像仓库密钥。 +您可以点击右边的 ,从列表中选择一个镜像,或者输入镜像名称进行搜索。KubeSphere 提供 Docker Hub 的镜像以及您的私有镜像仓库的镜像。如果想使用私有镜像仓库,您需要先在**配置**下的**保密字典**中创建镜像仓库保密字典。 {{< notice note >}} @@ -57,22 +60,22 @@ weight: 10280 - CPU 预留显示在清单文件中的 `.spec.containers[].resources.requests.cpu`,实际用量可以超过 CPU 预留。 - 内存预留显示在清单文件中的 `.spec.containers[].resources.requests.memory`。实际用量可以超过内存预留,但节点内存不足时可能会清理容器。 -![资源预留和限制](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/resource-request-limit.PNG) - #### 资源限制 -您可以指定应用程序能使用的资源上限,包括 CPU 和内存,防止占用过多资源。 +您可以指定应用程序能使用的资源上限,包括 CPU、内存、GPU,防止占用过多资源。 - CPU 限制显示在清单文件中的 `.spec.containers[].resources.limits.cpu`。实际用量可以短时间超过 CPU 限制,容器不会被停止。 - 内存限制显示在清单文件中的 `.spec.containers[].resources.limits.memory`。实际用量不能超过内存限制,如果超过了,容器可能会被停止或者被调度到其他资源充足的机器上。 {{< notice note >}} -CPU 资源以 CPU 单位计量,即 KubeSphere 中的 **Core**。内存资源以字节计量,即 KubeSphere 中的 **Mi**。 +CPU 资源以 CPU 单位计量,即 KubeSphere 中的 **Core**。内存资源以字节计量,即 KubeSphere 中的 **MiB**。 {{}} -#### **端口/服务设置** +要设置 **GPU 类型**,请在下拉列表中选择一个 GPU 类型,默认为 `nvidia.com/gpu`。**GPU 限制**默认为不限制。 + +#### **端口设置** 您需要为容器设置访问协议和端口信息。请点击**使用默认端口**以自动填充默认设置。 @@ -80,70 +83,58 @@ CPU 资源以 CPU 单位计量,即 KubeSphere 中的 **Core**。内存资源 该值显示在 `imagePullPolicy` 字段。在仪表板上,您可以从下拉列表的以下三个选项中选择一个。 -![镜像拉取策略](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/image-pull-policy.PNG) +- **优先使用本地镜像**:只有本地不存在镜像时才会拉取镜像。 -- **优先使用本地镜像 (ifNotPresent)**:只有本地不存在镜像时才会拉取镜像。 +- **每次都拉取镜像**:只要启动容器组就会拉取镜像。 -- **尝试重新下载镜像 (Always)**:只要启动 Pod 就会拉取镜像。 - -- **仅使用本地镜像 (Never)**:无论镜像是否存在都不会拉取镜像。 +- **仅使用本地镜像**:无论镜像是否存在都不会拉取镜像。 {{< notice tip>}} -- 默认值是 `IfNotPresent`,但标记为 `:latest` 的镜像的默认值是 `Always`。 +- 默认值是 **优先使用本地镜像**,但标记为 `:latest` 的镜像的默认值是 **每次都拉取镜像**。 - Docker 会在拉取镜像时进行检查,如果 MD5 值没有变,则不会拉取镜像。 - 在生产环境中应尽量避免使用 `:latest`,在开发环境中使用 `:latest` 会自动拉取最新的镜像。 {{< /notice >}} -#### **健康检查器** +#### **健康检查** -支持**存活**、**就绪**和**启动**检查。 +支持存活检查、就绪检查和启动检查。 -![容器健康检查](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/container-health-check.PNG) +- **存活检查**:使用存活探针检测容器是否在运行,该参数显示在 `livenessProbe` 字段。 -- **容器存活检查**:使用存活探针检测容器是否在运行,该参数显示在 `livenessProbe` 字段。 +- **就绪检查**:使用就绪探针检测容器是否准备好处理请求,该参数显示在 `readinessProbe` 字段。 -- **容器就绪检查**:使用就绪探针检测容器是否准备好处理请求,该参数显示在 `readinessProbe` 字段。 +- **启动检查**:使用启动探针检测容器应用程序是否已经启动,该参数显示在 `startupProbe` 字段。 -- **容器启动检查**:使用启动探针检测容器应用程序是否已经启动,该参数显示在 `startupProbe` 字段。 +存活、就绪和启动检查包含以下配置: -存活、就绪和启动检查都包含以下配置: +- **HTTP 请求**:在容器 IP 地址的指定端口和路径上执行 HTTP `Get` 请求,如果响应状态码大于等于 200 且小于 400,则认为诊断成功。支持的参数包括: -- **HTTPGetAction(HTTP 请求检查)**:在容器 IP 地址的指定端口和路径上执行 HTTP `Get` 请求,如果响应状态码大于等于 200 且小于 400,则认为诊断成功。支持的参数包括: + - **路径**:HTTP 或 HTTPS,由 `scheme` 指定;访问 HTTP 服务器的路径,由 `path` 指定;访问端口或端口名由容器暴露,端口号必须在 1 和 65535 之间,该值由 `port` 指定。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 - ![HTTP 请求检查](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/http-request-check.PNG) - - - **方案**:HTTP 或 HTTPS,由 `scheme` 指定。 - - **路径**:访问 HTTP 服务器的路径,由 `path` 指定。 - - **端口**:访问端口或端口名由容器暴露。端口号必须在 1 和 65535 之间。该值由 `port` 指定。 - - **初始延迟**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 - - **执行探测频率**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 - - **超时时间**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 - - **健康阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 - - **不健康阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 - -- **TCPSocketAction(TCP 端口检查)**:在容器 IP 地址的指定端口上执行 TCP 检查。如果该端口打开,则认为诊断成功。支持的参数包括: - - ![TCP 端口检查](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/tcp-port-check.PNG) +- **TCP 端口**:在容器 IP 地址的指定端口上执行 TCP 检查。如果该端口打开,则认为诊断成功。支持的参数包括: - **端口**:访问端口或端口名由容器暴露。端口号必须在 1 和 65535 之间。该值由 `port` 指定。 - - **初始延迟**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 - - **执行探测频率**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 - - **超时时间**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 - - **健康阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 - - **不健康阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 -- **ExecAction(执行命令检查)**:在容器中执行指定命令。如果命令退出时返回代码为 0,则认为诊断成功。支持的参数包括: - - ![执行命令检查](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/exec-command-check.PNG) +- **命令**:在容器中执行指定命令。如果命令退出时返回代码为 0,则认为诊断成功。支持的参数包括: - **命令**:用于检测容器健康状态的检测命令,由 `exec.command` 指定。 - - **初始延迟**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 - - **执行探测频率**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 - - **超时时间**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 - - **健康阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 - - **不健康阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 + - **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数,由 `initialDelaySeconds` 指定。默认为 0。 + - **检查间隔(s)**:探测频率(以秒为单位),由 `periodSeconds` 指定。默认为 10,最小值为 1。 + - **超时时间(s)**:探针超时的秒数,由 `timeoutSeconds` 指定。默认为 1,最小值为 1。 + - **成功阈值**:探测失败后,视为探测成功的最小连续成功次数,由 `successThreshold` 指定。默认为 1,存活探针和启动探针的该值必须为 1。最小值为 1。 + - **失败阈值**:探测成功后,视为探测失败的最小连续失败次数,由 `failureThreshold` 指定。默认为 3,最小值为 1。 有关健康检查的更多信息,请访问[容器探针](https://kubernetes.io/zh/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)。 @@ -151,30 +142,24 @@ CPU 资源以 CPU 单位计量,即 KubeSphere 中的 **Core**。内存资源 默认情况下,容器会运行默认镜像命令。 -![启动命令](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/start-command.PNG) - -- **运行命令**对应清单文件中容器的 `command` 字段。 +- **命令**对应清单文件中容器的 `command` 字段。 - **参数**对应清单文件中容器的 `args` 字段。 有关该命令的更多信息,请访问[为容器设置启动时要执行的命令和参数](https://kubernetes.io/zh/docs/tasks/inject-data-application/define-command-argument-container/)。 #### **环境变量** -以键值对形式为 Pod 配置环境变量。 - -![环境变量](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/envi-var.PNG) +以键值对形式为容器组配置环境变量。 - 名称:环境变量的名称,由 `env.name` 指定。 - 值:变量引用的值,由 `env.value` 指定。 -- 类型:环境变量的类型,支持自定义、配置项、键以及变量或变量引用。 +- 点击**使用配置字典或保密字典**来使用现有的配置字典或保密字典。 -有关该命令的更多信息,请访问 [Pod 变量](https://kubernetes.io/zh/docs/tasks/inject-data-application/environment-variable-expose-pod-information/)。 +有关该命令的更多信息,请访问 [容器组变量](https://kubernetes.io/zh/docs/tasks/inject-data-application/environment-variable-expose-pod-information/)。 -#### **容器组 Security Context** +#### **容器安全上下文** -Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Security Context 的更多信息,请访问 [Pod 安全策略](https://kubernetes.io/zh/docs/concepts/policy/pod-security-policy/)。 - -![Security Context](/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/security-context.PNG) +安全上下文(Security Context)定义容器组或容器的特权和访问控制设置。有关安全上下文的更多信息,请访问 [容器组安全策略](https://kubernetes.io/zh/docs/concepts/policy/pod-security-policy/)。 #### **同步主机时区** @@ -182,7 +167,7 @@ Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Sec ## **更新策略** -### Pod 更新 +### 容器组更新 不同工作负载使用不同的更新策略。 @@ -190,15 +175,15 @@ Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Sec {{< tab "部署" >}} -`.spec.strategy` 字段指定用于用新 Pod 替换旧 Pod 的策略。`.spec.strategy.type` 可以是 `Recreate` 或 `RollingUpdate`。默认值是 `RollingUpdate`。 +`.spec.strategy` 字段指定用于用新容器组替换旧容器组的策略。`.spec.strategy.type` 可以是 `Recreate` 或 `RollingUpdate`。默认值是 `RollingUpdate`。 - **滚动更新(推荐)** 滚动更新将逐步用新版本的实例替换旧版本的实例。升级过程中,流量会同时负载均衡分布到新老版本的实例上,因此服务不会中断。 -- **替换升级** +- **同时更新** - 替换升级会先删除现有的 Pod,再创建新的 Pod。请注意,升级过程中服务会中断。 + 替换升级会先删除现有的容器组,再创建新的容器组。请注意,升级过程中服务会中断。 有关升级策略的更多信息,请访问[部署的策略部分](https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/#strategy)。 @@ -206,15 +191,15 @@ Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Sec {{< tab "有状态副本集" >}} -**更新策略**下的下拉菜单显示在清单文件中有状态副本集的 `.spec.updateStrategy` 字段。您可以处理 Pod 容器、标签、资源预留或限制以及注解的更新。有两种策略: +**更新策略**下的下拉菜单显示在清单文件中有状态副本集的 `.spec.updateStrategy` 字段。您可以处理容器组容器、标签、资源预留或限制以及注解的更新。有两种策略: - **滚动更新(推荐)** - 如果 `.spec.template` 已更新,有状态副本集中的 Pod 将被自动删除,并创建新的 Pod 来替换。Pod 将按照反向顺序更新,依次删除和创建。前一个 Pod 更新完成并开始运行后,才会开始更新下一个新的 Pod。 + 如果 `.spec.template` 已更新,有状态副本集中的容器组将被自动删除,并创建新的容器组来替换。容器组将按照反向顺序更新,依次删除和创建。前一个容器组更新完成并开始运行后,才会开始更新下一个新的容器组。 - **删除容器组时更新** - 如果 `.spec.template` 已更新,有状态副本集中的 Pod 将不会自动更新。您需要手动删除旧的 Pod,控制器才会创建新的 Pod。 + 如果 `.spec.template` 已更新,有状态副本集中的容器组将不会自动更新。您需要手动删除旧的容器组,控制器才会创建新的容器组。 有关更新策略的更多信息,请访问[有状态副本集更新策略](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/#update-strategies)。 @@ -222,15 +207,15 @@ Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Sec {{< tab "守护进程集" >}} -**更新策略**下的下拉菜单显示在清单文件中守护进程集的 `.spec.updateStrategy` 字段。您可以处理 Pod 容器、标签、资源预留或限制以及注解的更新。有两种策略: +**更新策略**下的下拉菜单显示在清单文件中守护进程集的 `.spec.updateStrategy` 字段。您可以处理容器组容器、标签、资源预留或限制以及注解的更新。有两种策略: - **滚动更新(推荐)** - 如果 `.spec.template` 已更新,旧的守护进程集 Pod 将被终止,并以受控方式自动创建新的 Pod。整个更新过程中,每个节点上至多只有一个守护进程集的 Pod 运行。 + 如果 `.spec.template` 已更新,旧的守护进程集容器组将被终止,并以受控方式自动创建新的容器组。整个更新过程中,每个节点上至多只有一个守护进程集的容器组运行。 - **删除容器组时更新** - 如果 `.spec.template` 已更新,只有当您手动删除旧的守护进程集 Pod 时才会创建新的守护进程集 Pod。这与 1.5 或之前版本 Kubernetes 中的守护进程集的操作行为相同。 + 如果 `.spec.template` 已更新,只有当您手动删除旧的守护进程集容器组时才会创建新的守护进程集容器组。这与 1.5 或之前版本 Kubernetes 中的守护进程集的操作行为相同。 有关更新策略的更多信息,请访问[守护进程集更新策略](https://kubernetes.io/zh/docs/tasks/manage-daemon/update-daemon-set/#daemonset-%E6%9B%B4%E6%96%B0%E7%AD%96%E7%95%A5)。 @@ -238,45 +223,46 @@ Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Sec {{}} -### 更新时 Pod 数量 +### 滚动更新设置 {{< tabs >}} {{< tab "部署" >}} -部署中的**更新时容器组数量**与有状态副本集中的不同。 +部署中的**滚动更新设置**与有状态副本集中的不同。 -- **容器组最大不可用数量**:升级过程中允许不可用的 Pod 的最大数量,由 `maxUnavailable` 指定。默认值是 25%。 -- **容器组最大超出数量**:可调度的超过期望数量的 Pod 的最大数量,由 `maxSurge` 指定。默认值是 25%。 +- **最大不可用容器组数量**:升级过程中允许不可用的容器组的最大数量,由 `maxUnavailable` 指定。默认值是 25%。 +- **最大多余容器组数量**:可调度的超过期望数量的容器组的最大数量,由 `maxSurge` 指定。默认值是 25%。 {{}} {{< tab "有状态副本集" >}} -如果您对更新进行分区,当更新有状态副本集的 Pod 配置时,所有序号大于等于该分区序号值的 Pod 都会被更新。该字段由 `.spec.updateStrategy.rollingUpdate.partition` 指定,默认值是 0。有关分区的更多信息,请访问[分区](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/#partitions)。 +**容器组副本分组序号**:如果您对更新进行分区,当更新有状态副本集的容器组配置时,所有序号大于等于该分区序号值的容器组都会被更新。该字段由 `.spec.updateStrategy.rollingUpdate.partition` 指定,默认值是 0。有关分区的更多信息,请访问[分区](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/#partitions)。 {{}} {{< tab "守护进程集" >}} -守护进程集中的**更新时容器组数量**与有状态副本集中的不同。 +守护进程集中的**滚动更新设置**与有状态副本集中的不同。 -- **容器组最大不可用数量**:升级过程中允许不可用的 Pod 的最大数量,由 `maxUnavailable` 指定。默认值是 20%。 -- **最小就绪时间**:新创建的守护进程集的 Pod 被视为可用之前的最少秒数,由 `minReadySeconds` 指定。默认值是 0。 +- **最大不可用容器组数量**:升级过程中允许不可用的容器组的最大数量,由 `maxUnavailable` 指定。默认值是 20%。 +- **容器组就绪最短运行时长(s)**:新创建的守护进程集的容器组被视为可用之前的最少秒数,由 `minReadySeconds` 指定。默认值是 0。 {{}} {{}} -### 容器组 Security Context +### 容器组安全上下文 -Security Context 定义 Pod 或容器的特权和访问控制设置。有关 Pod Security Context 的更多信息,请访问 [Pod 安全策略](https://kubernetes.io/zh/docs/concepts/policy/pod-security-policy/)。 +安全上下文(Security Context)定义容器组或容器的特权和访问控制设置。有关容器组安全上下文的更多信息,请访问[容器组安全策略](https://kubernetes.io/zh/docs/concepts/policy/pod-security-policy/)。 -### 部署模式 +### 容器组调度规则 -您可以选择不同的部署模式,切换 Pod 间亲和与 Pod 间反亲和。在 Kubernetes 中,Pod 间亲和由 `affinity` 字段下的 `podAffinity` 字段指定,而 Pod 间反亲和由 `affinity` 字段下的 `podAntiAffinity` 字段指定。在 KubeSphere 中,`podAffinity` 和 `podAntiAffinity` 都设置为 `preferredDuringSchedulingIgnoredDuringExecution`。您可以在右上角启用**编辑模式**查看字段详情, +您可以选择不同的容器组调度规则,切换容器组间亲和与容器组间反亲和。在 Kubernetes 中,容器组间亲和由 `affinity` 字段下的 `podAffinity` 字段指定,而容器组间反亲和由 `affinity` 字段下的 `podAntiAffinity` 字段指定。在 KubeSphere 中,`podAffinity` 和 `podAntiAffinity` 都设置为 `preferredDuringSchedulingIgnoredDuringExecution`。您可以在右上角启用**编辑 YAML**查看字段详情, -- **容器组分散部署**代表反亲和性。 -- **容器组聚合部署**代表亲和性。 +- **分散调度**代表反亲和性。 +- **集中调度**代表亲和性。 +- **自定义规则**即按需添加自定义调度规则。 -有关亲和性和反亲和性的更多信息,请访问 [Pod 亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#pod-%E9%97%B4%E4%BA%B2%E5%92%8C%E4%B8%8E%E5%8F%8D%E4%BA%B2%E5%92%8C)。 +有关亲和性和反亲和性的更多信息,请访问 [容器组亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#pod-%E9%97%B4%E4%BA%B2%E5%92%8C%E4%B8%8E%E5%8F%8D%E4%BA%B2%E5%92%8C)。 diff --git a/content/zh/docs/project-user-guide/application-workloads/cronjobs.md b/content/zh/docs/project-user-guide/application-workloads/cronjobs.md index 802ded4dc..1e94a67c8 100644 --- a/content/zh/docs/project-user-guide/application-workloads/cronjobs.md +++ b/content/zh/docs/project-user-guide/application-workloads/cronjobs.md @@ -13,7 +13,7 @@ weight: 10260 ## 准备工作 -您需要创建一个企业空间、一个项目以及一个帐户 (`project-regular`)。必须邀请该帐户至该项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`)。必须邀请该用户至该项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建定时任务 @@ -21,13 +21,9 @@ weight: 10260 以 `project-regular` 身份登录控制台。转到项目的**任务**页面,然后在**定时任务**选项卡下点击**创建**。 -![定时任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list.png) - ### 步骤 2:输入基本信息 -您可以参考下图在每个字段中输入基本信息。完成操作后,点击**下一步**。 - -![基本信息](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-create-basic-info.png) +您可以参考下面的说明在每个字段中输入基本信息。完成操作后,点击**下一步**。 - **名称**:定时任务的名称,也是唯一标识符。 - **别名**:定时任务的别名,使资源易于识别。 @@ -40,45 +36,39 @@ weight: 10260 | 每周 | `0 0 * * 0` | | 每月 | `0 0 1 * *` | -- **高级设置(执行参数)**: +- **高级设置**: - - **启动 Job 的期限(秒)**:由清单文件中的 `.spec.startingDeadlineSeconds` 指定,此可选字段表示如果由于任何原因错过计划时间,定时任务启动所需的最大秒数。错过执行的定时任务将被计为失败。如果未指定此字段,则此定时任务没有启动期限。 - - **保留完成 Job 数**:由清单文件中的 `.spec.successfulJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行成功的次数,用于区分显式零和未指定这两种情况。默认值为 3。 - - **保留失败 Job 数**:由清单文件中的 `.spec.failedJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行失败的次数,用于区分显式零和未指定这两种情况。默认值为 1。 - - **并发策略**:由 `.spec.concurrencyPolicy` 指定,它表示如何处理任务的并发执行。有效值为: - - **Allow** (默认值):允许定时任务并发运行。 - - **Forbid**:禁止并发运行,如果前一个运行还没有完成,则跳过下一个运行。 - - **Replace**:取消当前正在运行的任务,用一个新的来替换。 + - **最大启动延后时间(s)**:由清单文件中的 `.spec.startingDeadlineSeconds` 指定,此可选字段表示如果由于任何原因错过计划时间,定时任务启动所需的最大秒数。错过执行的定时任务将被计为失败。如果未指定此字段,则此定时任务没有启动期限。 + - **成功任务保留数量**:由清单文件中的 `.spec.successfulJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行成功的次数,用于区分显式零和未指定这两种情况。默认值为 3。 + - **失败任务保留数量**:由清单文件中的 `.spec.failedJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行失败的次数,用于区分显式零和未指定这两种情况。默认值为 1。 + - **并发策略**:由 `.spec.concurrencyPolicy` 指定,它表示如何处理任务的并发执行: + - **同时运行任务** (默认值):允许定时任务并发运行。 + - **跳过新任务**:禁止并发运行,如果前一个运行还没有完成,则跳过下一个运行。 + - **跳过旧任务**:取消当前正在运行的任务,用一个新的来替换。 {{< notice note >}} -您可以在右上角开启**编辑模式**,查看此定时任务的 YAML 格式清单文件。 +您可以在右上角开启**编辑 YAML**,查看此定时任务的 YAML 格式清单文件。 {{}} ### 步骤 3:定时任务设置(可选) -请参考[任务](../jobs/#步骤-3任务设置可选)。 +请参考[任务](../jobs/#步骤-3策略设置可选)。 -### 步骤 4:设置镜像 +### 步骤 4:设置容器组 -1. 点击**容器镜像**下的**添加容器镜像**,在搜索栏中输入 `busybox`,然后按**回车**键。 - - ![输入 busybox](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/input-busybox.png) +1. 点击**容器**下的**添加容器镜像**,在搜索栏中输入 `busybox`,然后按**回车**键。 2. 向下滚动到**启动命令**然后在**参数**框中输入 `/bin/sh,-c,date; echo "KubeSphere!"`。 - ![启动命令](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/start-command.png) - 3. 点击 **√** 完成镜像设置,然后点击**下一步**继续。 - ![完成镜像设置](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/finish-image.png) - {{< notice note >}} -- 此示例定时任务输出 `KubeSphere`。有关设置镜像的更多信息,请参见[容器镜像设置](../container-image-settings/)。 +- 此示例定时任务输出 `KubeSphere`。有关设置镜像的更多信息,请参见[容器组设置](../container-image-settings/)。 - 有关**重启策略**的更多信息,请参见[任务](../jobs/#步骤-4设置镜像)。 -- 您可以跳过本教程的**挂载存储**和**高级设置**。有关更多信息,请参见部署一文中的[挂载存储卷](../deployments/#步骤-4挂载存储卷)和[配置高级设置](../deployments/#步骤-5配置高级设置)。 +- 您可以跳过本教程的**存储卷设置**和**高级设置**。有关更多信息,请参见部署一文中的[挂载存储卷](../deployments/#步骤-4挂载存储卷)和[配置高级设置](../deployments/#步骤-5配置高级设置)。 {{}} @@ -86,23 +76,11 @@ weight: 10260 1. 在最后一步**高级设置**中,点击**创建**完成操作。如果创建成功,定时任务列表中将添加一个新条目。此外,您还可以在**任务**选项卡下查看任务。 - ![定时任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list-new.png) - - ![任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-list.png) - -2. 在**定时任务**选项卡下,点击此定时任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。由于**保留完成 Job 数**字段设置为 3,因此这里显示定时任务成功执行 3 次。 - - ![执行记录](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/execution-record.png) +2. 在**定时任务**选项卡下,点击此定时任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。由于**成功任务保留数量**字段设置为 3,因此这里显示定时任务成功执行 3 次。 3. 点击任意记录,您将转到该任务的详情页面。 - ![任务详情页面](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-detail-page.png) - -4. 在**资源状态**中,您可以检查 Pod 状态。点击右侧的 ,可以检查容器日志,如下所示,该日志显示预期输出。 - - ![容器日志-1](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-1.png) - - ![容器日志-2](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-2.png) +4. 在**资源状态**中,您可以检查容器组状态。点击右侧的 ,然后点击 可以检查容器日志,如下所示,该日志显示预期输出。 ## 定时任务操作 @@ -110,7 +88,17 @@ weight: 10260 - **编辑信息**:编辑基本信息,但无法编辑该定时任务的`名称`。 - **暂停/启动**:暂停或启动该定时任务。暂停定时任务将告知控制器暂停后续执行任务,但已经启动的执行不受影响。 -- **编辑配置文件**:编辑该定时任务的 YAML 文件配置。 +- **编辑 YAML**:编辑该定时任务的 YAML 文件配置。 - **删除**:删除该定时任务,然后返回定时任务列表页面。 -![定时任务操作](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-action.png) \ No newline at end of file +### 任务记录 + +点击**任务记录**选项卡查看定时任务的执行记录。 + +### 元数据 + +点击**元数据**选项卡查看定时任务的标签和注解。 + +### 事件 + +点击**事件**选项卡查看定时任务的事件。 diff --git a/content/zh/docs/project-user-guide/application-workloads/daemonsets.md b/content/zh/docs/project-user-guide/application-workloads/daemonsets.md index 6f7588e18..1df14cc2a 100644 --- a/content/zh/docs/project-user-guide/application-workloads/daemonsets.md +++ b/content/zh/docs/project-user-guide/application-workloads/daemonsets.md @@ -7,7 +7,7 @@ linkTitle: "守护进程集" weight: 10230 --- -守护进程集管理多组 Pod 副本,确保所有(或某些)节点运行一个 Pod 的副本。集群添加节点时,守护进程集会根据需要自动将 Pod 添加到新节点。 +守护进程集管理多组容器组副本,确保所有(或某些)节点运行一个容器组的副本。集群添加节点时,守护进程集会根据需要自动将容器组添加到新节点。 有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/)。 @@ -21,7 +21,7 @@ weight: 10230 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建守护进程集 @@ -29,52 +29,40 @@ weight: 10230 以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,点击**守护进程集**选项卡下面的**创建**。 -![守护进程集](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets.png) - ### 步骤 2:输入基本信息 为该守护进程集指定一个名称(例如 `demo-daemonset`),点击**下一步**继续。 -![输入名称](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_1.png) +### 步骤 3:设置容器组 -### 步骤 3:设置镜像 - -1. 点击**添加容器镜像**。 - - ![添加容器镜像](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_btn.png) +1. 点击**添加容器**。 2. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `fluentd` 然后按**回车键**。 - ![输入镜像名称](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_1.png) - {{< notice note >}} - 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 -- 如果想使用您的私有镜像仓库,您应该先通过**配置中心**下面的**密钥**[创建镜像仓库密钥](../../../project-user-guide/configuration/image-registry/)。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 {{}} 3. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 - ![资源请求和限制](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonset-request-limit.png) - 4. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 5. 在下拉菜单中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 -6. 对于其他设置(**健康检查器**、**启动命令**、**环境变量**、**容器 Security Context** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 +6. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 7. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../../../project-user-guide/application-workloads/container-image-settings/#更新策略)。 -8. 选择部署模式。有关更多信息,请参见[部署模式](../../../project-user-guide/application-workloads/container-image-settings/#部署模式)。 +8. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 -9. 完成容器镜像设置后,点击**下一步**继续。 +9. 完成容器组设置后,点击**下一步**继续。 ### 步骤 4:挂载存储卷 -您可以直接添加存储卷或者挂载 ConfigMap 或密钥,或者直接点击**下一步**跳过该步骤。有关存储卷的更多信息,请访问[存储卷](../../../project-user-guide/storage/volumes/#挂载存储卷)。 - -![挂载存储](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_3.png) +您可以直接添加存储卷或者挂载配置字典或保密字典,或者直接点击**下一步**跳过该步骤。有关存储卷的更多信息,请访问[存储卷](../../../project-user-guide/storage/volumes/#挂载存储卷)。 {{< notice note >}} @@ -86,8 +74,6 @@ weight: 10230 您可以在该部分添加元数据。完成操作后,点击**创建**完成创建守护进程集的整个流程。 -![高级设置](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_4.png) - - **添加元数据** 为资源进行额外的元数据设置,例如**标签**和**注解**。 @@ -96,62 +82,46 @@ weight: 10230 ### 详情页面 -1. 守护进程集创建后会显示在下方的列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的守护进程集。 +1. 守护进程集创建后会显示列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的守护进程集。 - ![守护进程集列表](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_list.png) - - - **编辑**:查看并编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该守护进程集。 + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该守护进程集。 - **删除**:删除该守护进程集。 2. 点击守护进程集名称可以进入它的详情页面。 - ![详情页面](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail.png) - 3. 点击**更多操作**,显示您可以对该守护进程集进行的操作。 - ![更多操作](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_operation_btn.png) - - - **版本回退**:选择要回退的版本。 - - **编辑配置模板**:配置更新策略、容器和存储卷。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该守护进程集。 + - **回退**:选择要回退的版本。 + - **编辑设置**:配置更新策略、容器和存储卷。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该守护进程集。 - **删除**:删除该守护进程集并返回守护进程集列表页面。 -4. 点击**资源状态**选项卡,查看该守护进程集的端口和 Pod 信息。 +4. 点击**资源状态**选项卡,查看该守护进程集的端口和容器组信息。 - ![资源状态](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_state.png) + - **副本运行状态**:您无法更改守护进程集的容器组副本数量。 + - **容器组** - - **副本运行状态**:您无法更改守护进程集的 Pod 副本数量。 - - **Pod 详情** - - ![Pod 详情](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_pod.png) - - - Pod 列表中显示了 Pod 详情(运行状态、节点、Pod IP 以及资源使用情况)。 - - 您可以点击 Pod 条目查看容器信息。 + - 容器组列表中显示了容器组详情(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 - 点击容器日志图标查看容器的输出日志。 - - 您可以点击 Pod 名称查看 Pod 详情页面。 + - 您可以点击容器组名称查看容器组详情页面。 ### 版本记录 -修改工作负载的资源模板后,会生成一个新的日志并重新调度 Pod 进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新部署。 +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新创建。 ### 元数据 点击**元数据**选项卡以查看守护进程集的标签和注解。 -![daemonsets](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_metadata.png) - ### 监控 1. 点击**监控**选项卡以查看 CPU 使用量、内存使用量、网络流入速率和网络流出速率。 - ![daemonsets](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_monitoring.png) - -2. 点击右上角的下拉菜单以自定义时间范围和时间间隔。 - - ![daemonsets_time_range](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_time_range.png) +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 3. 点击右上角的 / 以开始或停止自动刷新数据。 @@ -161,10 +131,6 @@ weight: 10230 点击**环境变量**选项卡以查看守护进程集的环境变量。 -![daemonsets](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_env_variable.png) - ### 事件 -点击**事件**以查看守护进程集的事件。 - -![daemonsets](/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_events.png) \ No newline at end of file +点击**事件**以查看守护进程集的事件。 \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/application-workloads/deployments.md b/content/zh/docs/project-user-guide/application-workloads/deployments.md index 8c7ca824c..a17b66ccd 100644 --- a/content/zh/docs/project-user-guide/application-workloads/deployments.md +++ b/content/zh/docs/project-user-guide/application-workloads/deployments.md @@ -7,13 +7,13 @@ linkTitle: "部署" weight: 10210 --- -部署控制器为 Pod 和副本集提供声明式升级。您可以在部署对象中描述一个期望状态,部署控制器会以受控速率将实际状态变更为期望状态。一个部署运行着应用程序的几个副本,它会自动替换宕机或故障的实例。因此,部署能够确保应用实例可用,处理用户请求。 +部署控制器为容器组和副本集提供声明式升级。您可以在部署对象中描述一个期望状态,部署控制器会以受控速率将实际状态变更为期望状态。一个部署运行着应用程序的几个副本,它会自动替换宕机或故障的实例。因此,部署能够确保应用实例可用,处理用户请求。 有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/)。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建部署 @@ -21,60 +21,46 @@ weight: 10210 以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,点击**部署**选项卡下面的**创建**。 -![部署](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments.PNG) - ### 步骤 2:输入基本信息 为该部署指定一个名称(例如 `demo-deployment`),点击**下一步**继续。 -![输入部署名称](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-name.PNG) +### 步骤 3:设置容器组 -### 步骤 3:设置镜像 - -1. 设置镜像前,请点击**容器组副本数量**中的 来定义 Pod 的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 +1. 设置镜像前,请点击**容器组副本数量**中的 来定义容器组的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 {{< notice tip >}} -您可以启用右上角的**编辑模式**,查看 YAML 格式的部署清单文件。KubeSphere 使您可以直接编辑清单文件创建部署,或者您可以按照下列步骤使用仪表板创建部署。 +您可以启用右上角的**编辑 YAML**,查看 YAML 格式的部署清单文件。KubeSphere 使您可以直接编辑清单文件创建部署,或者您可以按照下列步骤使用仪表板创建部署。 {{}} - ![设置副本数量](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/replica-number.PNG) - -2. 点击**添加容器镜像**。 - - ![添加容器镜像](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/add-container-image.PNG) +2. 点击**添加容器**。 3. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `nginx` 然后按**回车键**。 - ![输入镜像名称](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/input-image-name.PNG) - {{< notice note >}} - 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 -- 如果想使用您的私有镜像仓库,您应该先通过**配置中心**下面的**密钥**[创建镜像仓库密钥](../../../project-user-guide/configuration/image-registry/)。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 {{}} 4. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 - ![资源设置](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-setting.PNG) - 5. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 -6. 在下拉菜单中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 +6. 在下拉列表中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 -7. 对于其他设置(**健康检查器**、**启动命令**、**环境变量**、**容器 Security Context** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 +7. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 8. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../../../project-user-guide/application-workloads/container-image-settings/#更新策略)。 -9. 选择部署模式。有关更多信息,请参见[部署模式](../../../project-user-guide/application-workloads/container-image-settings/#部署模式)。 +9. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 -10. 完成容器镜像设置后,点击**下一步**继续。 +10. 完成容器组设置后,点击**下一步**继续。 ### 步骤 4:挂载存储卷 -您可以直接添加存储卷或者挂载 ConfigMap 或密钥,或者直接点击**下一步**跳过该步骤。有关存储卷的更多信息,请访问[存储卷](../../../project-user-guide/storage/volumes/#挂载存储卷)。 - -![挂载存储](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/mount-volumes.PNG) +您可以直接添加存储卷或者挂载配置字典或保密字典,或者直接点击**下一步**跳过该步骤。有关存储卷的更多信息,请访问[存储卷](../../../project-user-guide/storage/volumes/#挂载存储卷)。 {{< notice note >}} @@ -86,11 +72,9 @@ weight: 10210 您可以在该部分设置节点调度策略并添加元数据。完成操作后,点击**创建**完成创建部署的整个流程。 -![高级设置](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/advanced-settings.PNG) +- **选择节点** -- **设置节点调度策略** - - 您可以让 Pod 副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 + 分配容器组副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 - **添加元数据** @@ -100,63 +84,47 @@ weight: 10210 ### 详情页面 -1. 部署创建后会显示在下方的列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的部署。 +1. 部署创建后会显示在列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的部署。 - ![部署列表](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-listed.PNG) - - - **编辑**:查看并编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该部署。 + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该部署。 - **删除**:删除该部署。 2. 点击部署名称可以进入它的详情页面。 - ![部署详情](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-detail-page.PNG) - 3. 点击**更多操作**,显示您可以对该部署进行的操作。 - ![更多操作](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/more-actions.PNG) - - - **版本回退**:选择要回退的版本。 - - **弹性伸缩**:根据 CPU 和内存使用情况自动伸缩副本。如果 CPU 和内存都已指定,则在满足任一条件时会添加或删除副本。 - - **编辑配置模板**:配置更新策略、容器和存储卷。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该部署。 + - **回退**:选择要回退的版本。 + - **编辑自动伸缩**:根据 CPU 和内存使用情况自动伸缩副本。如果 CPU 和内存都已指定,则在满足任一条件时会添加或删除副本。 + - **编辑设置**:配置更新策略、容器和存储卷。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该部署。 - **删除**:删除该部署并返回部署列表页面。 -4. 点击**资源状态**选项卡,查看该部署的端口和 Pod 信息。 +4. 点击**资源状态**选项卡,查看该部署的端口和容器组信息。 - ![资源状态](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-status.PNG) + - **副本运行状态**:点击 来增加或减少容器组副本数量。 + - **容器组** - - **副本运行状态**:点击 来增加或减少 Pod 副本数量。 - - **Pod 详情** - - ![Pod 详情](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/pod-details.PNG) - - - Pod 列表中显示了 Pod 详情(运行状态、节点、Pod IP 以及资源使用情况)。 - - 您可以点击 Pod 条目查看容器信息。 + - 容器组列表中显示了容器组详情(运行状态、节点、容器组 IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 - 点击容器日志图标查看容器的输出日志。 - - 您可以点击 Pod 名称查看 Pod 详情页面。 + - 您可以点击容器组名称查看容器组详情页面。 ### 版本记录 -修改工作负载的资源模板后,会生成一个新的日志并重新调度 Pod 进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新部署。 +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新部署。 ### 元数据 点击**元数据**选项卡以查看部署的标签和注解。 -![deployments](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-matadata.png) - ### 监控 1. 点击**监控**选项卡以查看部署的 CPU 使用量、内存使用量、网络流出速率和网络流入速率。 - ![deployments-monitoring](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-monitoring.png) - -2. 点击右上角的下拉菜单以自定义时间范围和时间间隔。 - - ![deployments](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-time-range.png) +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 3. 点击右上角的 / 以开始或停止数据自动刷新。 @@ -166,10 +134,6 @@ weight: 10210 点击**环境变量**选项卡以查看部署的环境变量。 -![deployments](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-env-variables.png) - ### 事件 -点击**事件**选项卡以查看部署的事件。 - -![deployments](/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-events.png) \ No newline at end of file +点击**事件**选项卡以查看部署的事件。 \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md b/content/zh/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md index ac1f11d76..f90cf35e1 100755 --- a/content/zh/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md +++ b/content/zh/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling.md @@ -1,21 +1,21 @@ --- -title: "Pod 弹性伸缩" -keywords: "Pod, 弹性伸缩, 弹性伸缩程序" -description: "如何在 KubeSphere 上配置 Pod 弹性伸缩." +title: "容器组弹性伸缩" +keywords: "容器组, 弹性伸缩, 弹性伸缩程序" +description: "如何在 KubeSphere 上配置容器组弹性伸缩." weight: 10290 --- -本文档描述了如何在 KubeSphere 上配置 Pod 弹性伸缩 (HPA)。 +本文档描述了如何在 KubeSphere 上配置容器组弹性伸缩 (HPA)。 -HPA 功能会自动调整 Pod 的数量,将 Pod 的平均资源使用(CPU 和内存)保持在预设值附近。有关 HPA 功能的详细情况,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/)。 +HPA 功能会自动调整容器组的数量,将容器组的平均资源使用(CPU 和内存)保持在预设值附近。有关 HPA 功能的详细情况,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/)。 本文档使用基于 CPU 使用率的 HPA 作为示例,基于内存使用量的 HPA 操作与其相似。 ## 准备工作 - 您需要[启用 Metrics Server](../../../pluggable-components/metrics-server/)。 -- 您需要创建一个企业空间、一个项目以及一个帐户(例如,`project-regular`)。`project-regular` 必须被邀请至此项目中,并被赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目以及一个用户(例如,`project-regular`)。`project-regular` 必须被邀请至此项目中,并被赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建服务 @@ -23,105 +23,71 @@ HPA 功能会自动调整 Pod 的数量,将 Pod 的平均资源使用(CPU 2. 在左侧导航栏中选择**应用负载**下的**服务**,然后点击右侧的**创建**。 - ![create-service](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png) - 3. 在**创建服务**对话框中,点击**无状态服务**。 - ![stateless-service](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png) - 4. 设置服务名称(例如,`hpa`),然后点击**下一步**。 - ![service-name](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png) - -5. 点击**添加容器镜像**,将**镜像**设置为 `mirrorgooglecontainers/hpa-example` 并点击**使用默认端口**。 - - ![add-container-image](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png) +5. 点击**添加容器**,将**镜像**设置为 `mirrorgooglecontainers/hpa-example` 并点击**使用默认端口**。 6. 为每个容器设置 CPU 请求(例如,0.15 core),点击 **√**,然后点击**下一步**。 {{< notice note >}} - * 若要使用基于 CPU 使用率的 HPA,就必须为每个容器设置 CPU 请求,即为每个容器预留的最低 CPU 资源(有关详细信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/))。HPA 功能会将 Pod 平均 CPU 使用率与 Pod 平均 CPU 请求的目标比率进行比较。 + * 若要使用基于 CPU 使用率的 HPA,就必须为每个容器设置 CPU 请求,即为每个容器预留的最低 CPU 资源(有关详细信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/tasks/run-application/horizontal-pod-autoscale/))。HPA 功能会将容器组平均 CPU 使用率与容器组平均 CPU 请求的目标比率进行比较。 * 若要使用基于内存使用量的 HPA,则不需要配置内存请求。 {{}} - ![cpu-request](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png) - -7. 点击**挂载存储**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 +7. 点击**存储卷设置**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 ## 配置 HPA 1. 左侧导航栏上选择**工作负载**中的**部署**,然后点击右侧的 HPA 部署(例如,hpa-v1)。 - ![hpa-deployment](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png) +2. 点击**更多操作**,从下拉菜单中选择**编辑自动伸缩**。 -2. 点击**更多操作**,从下拉菜单中选择**弹性伸缩**。 +3. 在**自动伸缩**对话框中,配置 HPA 参数,然后点击**确定**。 - ![horizontal-pod-autoscaling](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png) + * **目标 CPU 用量(%)**:容器组平均 CPU 请求的目标比率。 + * **目标内存用量(MiB)**:以 MiB 为单位的容器组平均内存目标使用量。 + * **最小副本数**:容器组的最小数量。 + * **最大副本数**:容器组的最大数量。 -3. 在**弹性伸缩**对话框中,配置 HPA 参数,然后点击**确定**。 - - * **CPU 目标使用率**:Pod 平均 CPU 请求的目标比率。 - * **内存目标使用量**:以 MiB 为单位的 Pod 平均内存目标使用量。 - * **最小副本数**:Pod 的最小数量。 - * **最大副本数**:Pod 的最大数量。 - - 在示例中,**CPU 目标使用率**设置为 `60`,**最小副本数**设置为 `1`,**最大副本数**设置为 `10`。 + 在示例中,**目标 CPU 用量(%)**设置为 `60`,**最小副本数**设置为 `1`,**最大副本数**设置为 `10`。 {{< notice note >}} - 当 Pod 的数量达到最大值时,请确保集群可以为所有 Pod 提供足够的资源。否则,一些 Pod 将创建失败。 + 当容器组的数量达到最大值时,请确保集群可以为所有容器组提供足够的资源。否则,一些容器组将创建失败。 {{}} - ![hpa-parameters](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png) - ## 验证 HPA -本节使用将请求发送到 HPA 服务的部署,以验证 HPA 是否会自动调整 Pod 的数量来满足资源使用目标。 +本节使用将请求发送到 HPA 服务的部署,以验证 HPA 是否会自动调整容器组的数量来满足资源使用目标。 ### 创建负载生成器部署 1. 在左侧导航栏中选择**应用负载**中的**工作负载**,然后点击右侧的**创建**。 - ![create-deployment](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png) - 2. 在**创建部署**对话框中,设置部署名称(例如,`load-generator`),然后点击**下一步**。 - ![deployment-name](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png) +3. 点击**添加容器**,将**镜像**设置为 `busybox`。 -3. 点击**添加容器镜像**,将**镜像**设置为 `busybox`。 - - ![busybox](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png) - -4. 在对话框中向下滚动,选择**启动命令**,然后将**运行命令**设置为 `sh,-c`,将**参数**设置为 `while true; do wget -q -O- http://..svc.cluster.local; done`(例如,`while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`)。 - - ![start-command](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png) +4. 在对话框中向下滚动,选择**启动命令**,然后将**命令**设置为 `sh,-c`,将**参数**设置为 `while true; do wget -q -O- http://..svc.cluster.local; done`(例如,`while true; do wget -q -O- http://hpa.demo-project.svc.cluster.local; done`)。 5. 点击 **√**,然后点击**下一步**。 -6. 点击**挂载存储**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 +6. 点击**存储卷设置**选项卡上的**下一步**,然后点击**高级设置**选项卡上的**创建**。 ### 查看 HPA 部署状态 -1. 负载生成器部署创建好后,在左侧导航栏中选择**应用负载**下的**工作负载**,然后点击右侧的 HPA 部署(例如,hpa-v1)。 +1. 负载生成器部署创建好后,在左侧导航栏中选择**应用负载**下的**工作负载**,然后点击右侧的 HPA 部署(例如,hpa-v1)。页面中显示的容器组的数量会自动增加以满足资源使用目标。 - Pod 的数量会自动增加以满足资源使用目标。 - - ![target-cpu-utilization](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png) - - ![pods-increase](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png) - -2. 在左侧导航栏选择**应用负载**中的**工作负载**,点击负载生成器部署(例如,load-generator-v1)右侧的 ,从下拉菜单中选择**删除**。负载生成器部署删除后,再次检查 HPA 部署的状态。 - - Pod 的数量会减少到最小值。 - - ![pods-decrease](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png) +2. 在左侧导航栏选择**应用负载**中的**工作负载**,点击负载生成器部署(例如,load-generator-v1)右侧的 ,从下拉菜单中选择**删除**。负载生成器部署删除后,再次检查 HPA 部署的状态。容器组的数量会减少到最小值。 {{< notice note >}} -系统可能需要一些时间来调整 Pod 的数量以及收集数据。 +系统可能需要一些时间来调整容器组的数量以及收集数据。 {{}} @@ -133,6 +99,5 @@ HPA 功能会自动调整 Pod 的数量,将 Pod 的平均资源使用(CPU 1. 在左侧导航栏选择**应用负载**中的**工作负载**,点击右侧的 HPA 部署(例如,hpa-v1)。 -2. 点击**弹性伸缩**右侧的 ,从下拉菜单中选择**取消**。 +2. 点击**自动伸缩**右侧的 ,从下拉菜单中选择**取消**。 - ![cancel-hpa](/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png) diff --git a/content/zh/docs/project-user-guide/application-workloads/jobs.md b/content/zh/docs/project-user-guide/application-workloads/jobs.md index f4c9550c9..35f96e551 100644 --- a/content/zh/docs/project-user-guide/application-workloads/jobs.md +++ b/content/zh/docs/project-user-guide/application-workloads/jobs.md @@ -1,21 +1,21 @@ --- title: 任务 -keywords: "KubeSphere, Kubernetes, docker, 任务" +keywords: "KubeSphere, Kubernetes, Docker, 任务" description: "了解任务的基本概念以及如何在 KubeSphere 中创建任务。" linkTitle: "任务" weight: 10250 --- -任务会创建一个或者多个 Pod,并确保指定数量的 Pod 成功结束。随着 Pod 成功结束,任务跟踪记录成功结束的 Pod 数量。当达到指定的成功结束数量时,任务(即 Job)完成。删除任务的操作会清除其创建的全部 Pod。 +任务会创建一个或者多个容器组,并确保指定数量的容器组成功结束。随着容器组成功结束,任务跟踪记录成功结束的容器组数量。当达到指定的成功结束数量时,任务(即 Job)完成。删除任务的操作会清除其创建的全部容器组。 -在简单的使用场景中,您可以创建一个任务对象,以便可靠地运行一个 Pod 直到结束。当第一个 Pod 故障或者被删除(例如因为节点硬件故障或者节点重启)时,任务对象会启动一个新的 Pod。您也可以使用一个任务并行运行多个 Pod。 +在简单的使用场景中,您可以创建一个任务对象,以便可靠地运行一个容器组直到结束。当第一个容器组故障或者被删除(例如因为节点硬件故障或者节点重启)时,任务对象会启动一个新的容器组。您也可以使用一个任务并行运行多个容器组。 下面的示例演示了在 KubeSphere 中创建任务的具体步骤,该任务会计算 π 到小数点后 2000 位。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建任务 @@ -23,8 +23,6 @@ weight: 10250 以 `project-regular` 身份登录控制台。转到**应用负载**下的**任务**,点击**创建**。 -![创建任务](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/create-job.PNG) - ### 步骤 2:输入基本信息 输入基本信息。请参考下图作为示例。 @@ -33,34 +31,26 @@ weight: 10250 - **别名**:任务的别名,使资源易于识别。 - **描述信息**:任务的描述,简要介绍任务。 -![输入基本信息](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-basic-info.PNG) +### 步骤 3:策略设置(可选) -### 步骤 3:任务设置(可选) - -您可以参照下图在该步骤设置值,或点击**下一步**以使用默认值。有关每个字段的详细说明,请参考下表。 - -![任务设置](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-job-settings.PNG) +您可以在该步骤设置值,或点击**下一步**以使用默认值。有关每个字段的详细说明,请参考下表。 | 名称 | 定义 | 描述信息 | | ---------------------- | ---------------------------- | ------------------------------------------------------------ | | 最大重试次数 | `spec.backoffLimit` | 指定将该任务视为失败之前的重试次数。默认值为 6。 | -| 完成数 | `spec.completions` | 指定该任务应该运行至成功结束的 Pod 的期望数量。如果设置为 nil,则意味着任何 Pod 成功结束即标志着所有 Pod 成功结束,并且允许并行数为任何正数值。如果设置为 1,则意味着并行数限制为 1,并且该 Pod 成功结束标志着任务成功完成。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | -| 并行数 | `spec.parallelism` | 指定该任务在任何给定时间应该运行的最大期望 Pod 数量。当剩余工作小于最大并行数时 ((`.spec.completions - .status.successful`) < `.spec.parallelism`),实际稳定运行的 Pod 数量会小于该值。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | -| 退出超时时限(单位:秒) | `spec.activeDeadlineSeconds` | 指定该任务在系统尝试终止任务前处于运行状态的持续时间(相对于 stratTime),单位为秒;该值必须是正整数。 | +| 容器组完成数量 | `spec.completions` | 指定该任务应该运行至成功结束的容器组的期望数量。如果设置为 nil,则意味着任何容器组成功结束即标志着所有容器组成功结束,并且允许并行数为任何正数值。如果设置为 1,则意味着并行数限制为 1,并且该容器组成功结束标志着任务成功完成。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | +| 并行容器组数量 | `spec.parallelism` | 指定该任务在任何给定时间应该运行的最大期望容器组数量。当剩余工作小于最大并行数时 ((`.spec.completions - .status.successful`) < `.spec.parallelism`),实际稳定运行的容器组数量会小于该值。有关更多信息,请参见 [Jobs](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 | +| 最大运行时间(s) | `spec.activeDeadlineSeconds` | 指定该任务在系统尝试终止任务前处于运行状态的持续时间(相对于 stratTime),单位为秒;该值必须是正整数。 | -### 步骤 4:设置镜像 +### 步骤 4:设置容器组 -1. **重启策略**选择 **Never**。当任务未完成时,您只能将**重启策略**指定为 **Never** 或 **OnFailure**: +1. **重启策略**选择**重新创建容器组**。当任务未完成时,您只能将**重启策略**指定为**重新创建容器组**或**重启容器**: - - 如果将**重启策略**设置为 **Never**,当 Pod 发生故障时,任务将创建一个新的 Pod,并且故障的 Pod 不会消失。 + - 如果将**重启策略**设置为**重新创建容器组**,当容器组发生故障时,任务将创建一个新的容器组,并且故障的容器组不会消失。 - - 如果将**重启策略**设置为 **OnFailure**,当 Pod 发生故障时,任务会在内部重启容器,而不是创建新的 Pod。 + - 如果将**重启策略**设置为**重启容器**,当容器组发生故障时,任务会在内部重启容器,而不是创建新的容器组。 - ![设置镜像](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-container-settings.PNG) - -2. 点击**添加容器镜像**,它将引导您进入**添加容器**页面。在镜像搜索栏中输入 `perl`,然后按**回车**键。 - - ![添加镜像](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/add-container-image-job.PNG) +2. 点击**添加容器**,它将引导您进入**添加容器**页面。在镜像搜索栏中输入 `perl`,然后按**回车**键。 3. 在该页面向下滚动到**启动命令**。在命令框中输入以下命令,计算 pi 到小数点后 2000 位并输出结果。点击右下角的 **√**,然后选择**下一步**继续。 @@ -68,13 +58,11 @@ weight: 10250 perl,-Mbignum=bpi,-wle,print bpi(2000) ``` - ![启动命令](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/start-command-job.PNG) - - {{< notice note >}}有关设置镜像的更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。{{}} + {{< notice note >}}有关设置镜像的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。{{}} ### 步骤 5:检查任务清单(可选) -1. 在右上角启用**编辑模式**,显示任务的清单文件。您可以看到所有值都是根据先前步骤中指定的值而设置。 +1. 在右上角启用**编辑 YAML**,显示任务的清单文件。您可以看到所有值都是根据先前步骤中指定的值而设置。 ```yaml apiVersion: batch/v1 @@ -115,34 +103,26 @@ weight: 10250 activeDeadlineSeconds: 300 ``` -2. 您可以直接在清单文件中进行调整,然后点击**创建**,或者关闭**编辑模式**然后返回**创建任务**页面。 +2. 您可以直接在清单文件中进行调整,然后点击**创建**,或者关闭**编辑 YAML**然后返回**创建任务**页面。 - {{< notice note >}}您可以跳过本教程的**挂载存储**和**高级设置**。有关更多信息,请参见[挂载存储卷](../../../project-user-guide/application-workloads/deployments/#步骤-4挂载存储卷)和[配置高级设置](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)。{{}} + {{< notice note >}}您可以跳过本教程的**存储卷设置**和**高级设置**。有关更多信息,请参见[挂载存储卷](../../../project-user-guide/application-workloads/deployments/#步骤-4挂载存储卷)和[配置高级设置](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)。{{}} ### 步骤 6:检查结果 1. 在最后一步**高级设置**中,点击**创建**完成操作。如果创建成功,将添加新条目到任务列表中。 - ![任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-list-new.PNG) - -2. 点击此任务,然后转到**执行记录**选项卡,您可以在其中查看每个执行记录的信息。先前在步骤 3 中**完成数**设置为 `4`,因此有四个已结束的 Pod。 - - ![执行记录](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-record.PNG) +2. 点击此任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。先前在步骤 3 中**完成数**设置为 `4`,因此有四个已结束的容器组。 {{< notice tip >}}如果任务失败,您可以重新运行该任务,失败原因显示在**消息**下。{{}} -3. 在**资源状态**中,您可以查看 Pod 状态。先前将**并行数**设置为 2,因此每次会创建两个 Pod。点击右侧的 ,查看容器日志,如下所示,该日志显示了预期的计算结果。 - - ![容器日志](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log.PNG) - - ![查看容器日志](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-check.jpg) +3. 在**资源状态**中,您可以查看容器组状态。先前将**并行容器组数量**设置为 2,因此每次会创建两个容器组。点击右侧的 ,然后点击 查看容器日志,该日志显示了预期的计算结果。 {{< notice tip >}} -- 在**资源状态**中,Pod 列表提供了 Pod 的详细信息(例如创建时间、节点、Pod IP 和监控数据)。 -- 您可以点击 Pod 查看容器信息。 +- 在**资源状态**中,容器组列表提供了容器组的详细信息(例如创建时间、节点、容器组 IP 和监控数据)。 +- 您可以点击容器组查看容器信息。 - 点击容器日志图标查看容器的输出日志。 -- 您可以点击 Pod 名称查看 Pod 详情页面。 +- 您可以点击容器组名称查看容器组详情页面。 {{}} @@ -153,17 +133,13 @@ weight: 10250 在任务详情页面上,您可以在任务创建后对其进行管理。 - **编辑信息**:编辑基本信息,但`名称`无法编辑。 -- **重新执行**:重新执行任务,Pod 将重启,并生成新的执行记录。 +- **重新执行**:重新执行任务,容器组将重启,并生成新的执行记录。 - **查看配置文件**:查看 YAML 格式的任务规格。 - **删除**:删除该任务并返回到任务列表页面。 -![任务操作](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-operation.PNG) +### 任务记录 -### 执行记录 - -1. 点击**执行记录**选项卡查看任务的执行记录。 - - ![execution-records](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-records.png) +1. 点击**任务记录**选项卡查看任务的执行记录。 2. 点击 刷新执行记录。 @@ -171,24 +147,17 @@ weight: 10250 1. 点击**资源状态**选项卡查看任务的容器组。 - ![resource-status](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/resource-status.png) - -2. 点击 刷新 Pod 信息,点击 / 显示或隐藏每个 Pod 中的容器。 +2. 点击 刷新容器组信息,点击 / 显示或隐藏每个容器组中的容器。 ### 元数据 点击**元数据**选项卡查看任务的标签和注解。 -![metadata](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/metadata.png) - ### 环境变量 点击**环境变量**选项卡查看任务的环境变量。 -![env-variable](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/env-variable.png) - ### 事件 点击**事件**选项卡查看任务的事件。 -![events](/images/docs/zh-cn/project-user-guide/application-workloads/jobs/events.png) diff --git a/content/zh/docs/project-user-guide/application-workloads/routes.md b/content/zh/docs/project-user-guide/application-workloads/routes.md index 7770603db..0a5dd8e43 100644 --- a/content/zh/docs/project-user-guide/application-workloads/routes.md +++ b/content/zh/docs/project-user-guide/application-workloads/routes.md @@ -11,36 +11,24 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. ## 准备工作 -- 您需要创建一个企业空间、一个项目以及两个帐户(例如,`project-admin` 和 `project-regular`)。在此项目中,`project-admin` 必须具有 `admin` 角色,`project-regular` 必须具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 -- 若要以 HTTPS 模式访问应用路由,则需要[创建密钥](../../../project-user-guide/configuration/secrets/)用于加密,密钥中需要包含 `tls.crt`(TLS 证书)和 `tls.key`(TLS 私钥)。 -- 您需要[创建至少一个服务](../../../project-user-guide/application-workloads/services/)。本文档使用演示服务作为示例,该服务会将 Pod 名称返回给外部请求。 +- 您需要创建一个企业空间、一个项目以及两个用户(例如,`project-admin` 和 `project-regular`)。在此项目中,`project-admin` 必须具有 `admin` 角色,`project-regular` 必须具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 +- 若要以 HTTPS 模式访问应用路由,则需要[创建保密证书](../../../project-user-guide/configuration/secrets/)用于加密,密钥中需要包含 `tls.crt`(TLS 证书)和 `tls.key`(TLS 私钥)。 +- 您需要[创建至少一个服务](../../../project-user-guide/application-workloads/services/)。本文档使用演示服务作为示例,该服务会将容器组名称返回给外部请求。 ## 配置应用路由访问方式 1. 以 `project-admin` 身份登录 KubeSphere 的 Web 控制台,然后访问您的项目。 -2. 在左侧导航栏中选择**项目设置**下的**高级设置**,点击右侧的**设置网关**。 +2. 在左侧导航栏中选择**项目设置**下的**网关设置**,点击右侧的**开启网关**。 + +3. 在出现的对话框中,将**访问模式**设置为 **NodePort** 或 **LoadBalancer**,然后点击**确认**。 {{< notice note >}} - 若已设置访问方式,则可以点击**编辑**,然后选择**编辑网关**以更改访问方式。 + 若将**访问模式**设置为 **LoadBalancer**,则可能需要根据插件用户指南在您的环境中启用负载均衡器插件。 {{}} - ![set-gateway](/images/docs/zh-cn/project-user-guide/application-workloads/routes/set-gateway.png) - -3. 在出现的**设置网关**对话框中,将**访问方式**设置为 **NodePort** 或 **LoadBalancer**,然后点击**保存**。 - - {{< notice note >}} - - 若将**访问方式**设置为 **LoadBalancer**,则可能需要根据插件用户指南在您的环境中启用负载均衡器插件。 - - {{}} - - ![access-method-nodeport](/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-nodeport.png) - - ![access-method-loadbalancer](/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-loadbalancer.png) - ## 创建应用路由 ### 步骤 1:配置基本信息 @@ -49,15 +37,11 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. 2. 选择左侧导航栏**应用负载**中的**应用路由**,点击右侧的**创建**。 - ![create-route](/images/docs/zh-cn/project-user-guide/application-workloads/routes/create-route.png) - 3. 在**基本信息**选项卡中,配置应用路由的基本信息,并点击**下一步**。 * **名称**:应用路由的名称,用作此应用路由的唯一标识符。 * **别名**:应用路由的别名。 * **描述信息**:应用路由的描述信息。 - ![basic-info](/images/docs/zh-cn/project-user-guide/application-workloads/routes/basic-info.png) - ### 步骤 2:配置路由规则 1. 在**路由规则**选项卡中,点击**添加路由规则**。 @@ -68,15 +52,11 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. * **路径**:将每个服务映射到一条路径。您可以点击**添加 Path** 来添加多条路径。 - ![auto-generate](/images/docs/zh-cn/project-user-guide/application-workloads/routes/auto-generate.png) - * **指定域名**:使用用户定义的域名。此模式同时支持 HTTP 和 HTTPS。 * **域名**:为应用路由设置域名。 * **协议**:选择 `http` 或 `https`。如果选择了 `https`,则需要选择包含 `tls.crt`(TLS 证书)和 `tls.key`(TLS 私钥)的密钥用于加密。 * **路径**:将每个服务映射到一条路径。您可以点击**添加 Path** 来添加多条路径。 - - ![specify-domain](/images/docs/zh-cn/project-user-guide/application-workloads/routes/specify-domain.png) ### (可选)步骤 3:配置高级设置 @@ -90,23 +70,15 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. {{}} - ![add-metadata](/images/docs/zh-cn/project-user-guide/application-workloads/routes/add-metadata.png) - ### 步骤 4:获取域名、服务路径和网关地址 1. 在左侧导航栏中选择**应用负载**中的**应用路由**,点击右侧的应用路由名称。 - ![route-list](/images/docs/zh-cn/project-user-guide/application-workloads/routes/route-list.png) +2. 在**规则**区域获取域名和服务路径以及网关地址。 -2. 在**规则**区域获取域名和服务路径,在**详情**区域获取网关地址。 + * 如果[应用路由访问模式](#配置应用路由访问方式)设置为 NodePort,则会使用 Kubernetes 集群节点的 IP 地址作为网关地址,NodePort 位于域名之后。 - * 如果[应用路由访问方式](#配置应用路由访问方式)设置为 NodePort,则会使用 Kubernetes 集群节点的 IP 地址作为网关地址,NodePort 位于域名之后。 - - ![obtain-address-nodeport](/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-nodeport.png) - - * 如果[应用路由访问方式](#配置应用路由访问方式)设置为 LoadBalancer,则网关地址由负载均衡器插件指定。 - - ![obtain-address-loadbalancer](/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png) + * 如果[应用路由访问模式](#配置应用路由访问方式)设置为 LoadBalancer,则网关地址由负载均衡器插件指定。 ## 配置域名解析 @@ -116,22 +88,18 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. ## 访问应用路由 -### NodePort 访问方式 +### NodePort 访问模式 1. 登录连接到应用路由网关地址的客户端机器。 2. 使用`<路由域名>:/<服务路径>`地址访问应用路由的后端服务。 - ![access-route-nodeport](/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-nodeport.png) - ### LoadBalancer 访问方式 1. 登录连接到应用路由网关地址的客户端机器。 2. 使用`<路由域名>/<服务路径>`地址访问应用路由的后端服务。 - ![access-route-loadbalancer](/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-loadbalancer.png) - {{< notice note >}} 如果您需要使用 NodePort 或 LoadBalancer 从私有网络外部访问应用路由,具体取决于您的网络环境: @@ -148,31 +116,21 @@ KubeSphere 上的应用路由和 Kubernetes 上的 [Ingress](https://kubernetes. 1. 在左侧导航栏中选择**工作负载**中的**应用路由**,点击右侧的应用路由名称。 - ![route-list](/images/docs/zh-cn/project-user-guide/application-workloads/routes/route-list.png) - 2. 点击**编辑信息**,或点击**更多操作**,从下拉菜单中选择一项操作。 * **编辑信息**:编辑应用路由的基本信息,但无法编辑路由名称。 - * **编辑配置文件**:编辑应用路由的 YAML 配置文件。 - * **编辑规则**:编辑应用路由的规则。 + * **编辑 YAML**:编辑应用路由的 YAML 配置文件。 + * **编辑路由规则**:编辑应用路由的规则。 * **编辑注解**:编辑应用路由的注解。有关更多信息,请参见 [Nginx Ingress controller 官方文档](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/)。 * **删除**:删除应用路由并返回应用路由列表页面。 - ![edit-route](/images/docs/zh-cn/project-user-guide/application-workloads/routes/edit-route.png) - ### 资源状态 点击**资源状态**选项卡查看应用路由规则。 -![resource-status](/images/docs/zh-cn/project-user-guide/application-workloads/routes/resource-status.png) - ### 元数据 点击**元数据**选项卡查看应用路由的标签和注解。 -![metadata](/images/docs/zh-cn/project-user-guide/application-workloads/routes/metadata.png) - ### 事件 点击**事件**选项卡查看应用路由的事件。 - -![events](/images/docs/zh-cn/project-user-guide/application-workloads/routes/events.png) \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/application-workloads/services.md b/content/zh/docs/project-user-guide/application-workloads/services.md index 89d098d9b..efe7582f6 100644 --- a/content/zh/docs/project-user-guide/application-workloads/services.md +++ b/content/zh/docs/project-user-guide/application-workloads/services.md @@ -6,17 +6,17 @@ linkTitle: "服务" weight: 10240 --- -服务是一种抽象方法,它将运行在一组 Pod 上的应用程序暴露为网络服务。也就是说,服务将这些 Pod 的 Endpoint 组成一个单一资源,可以通过不同的方式访问该资源。 +服务是一种抽象方法,它将运行在一组容器组上的应用程序暴露为网络服务。也就是说,服务将这些容器组的 Endpoint 组成一个单一资源,可以通过不同的方式访问该资源。 -有了 Kubernetes,您无需修改应用程序来使用不熟悉的服务发现机制。Kubernetes 为 Pod 提供 IP 地址,为一组 Pod 提供一个单一 DNS 名称,并且可以在 Pod 之间进行负载均衡。 +有了 Kubernetes,您无需修改应用程序来使用不熟悉的服务发现机制。Kubernetes 为容器组提供 IP 地址,为一组容器组提供一个单一 DNS 名称,并且可以在容器组之间进行负载均衡。 有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/services-networking/service/)。 ## 访问类型 -- **虚拟 IP**:虚拟 IP 基于集群生成的唯一 IP。集群内部可以通过该 IP 访问服务。此访问类型适用于大多数服务。此外,集群外部也可以通过 NodePort 和 LoadBalancer 访问服务。 +- **虚拟 IP**:虚拟 IP 是基于集群生成的唯一 IP。集群内部可以通过该 IP 访问服务。此访问类型适用于大多数服务。此外,集群外部也可以通过 NodePort 和 LoadBalancer 访问服务。 -- **Headless**:集群不为服务生成 IP 地址,在集群内通过服务的后端 Pod IP 直接访问服务。此访问类型适用于后端异构服务,例如需要区分 master 和 slave 的服务。 +- **Headless**:集群不为服务生成 IP 地址,在集群内通过服务的后端容器组 IP 直接访问服务。此访问类型适用于后端异构服务,例如需要区分 master 和 agent 的服务。 {{< notice tip>}} @@ -26,17 +26,15 @@ weight: 10240 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 服务类型 -如下图所示,KubeSphere 提供三种创建服务的基本方法:**无状态服务**、**有状态服务**和**外部服务**。另外,您还可以通过**自定义创建**下面的**指定工作负载**和**编辑 YAML** 来自定义服务。 - -![创建服务类型](/images/docs/zh-cn/project-user-guide/application-workloads/services/create-service-type.png) +KubeSphere 提供三种创建服务的基本方法:**无状态服务**、**有状态服务**和**外部服务**。另外,您还可以通过**自定义服务**下面的**指定工作负载**和**编辑 YAML** 来自定义服务。 - **无状态服务** - 无状态服务是容器服务中最常用的服务类型。无状态服务定义 Pod 模板来控制 Pod 状态,包括滚动更新和回滚。您创建无状态服务时会同时创建**部署**工作负载。有关无状态服务的更多信息,请参见[部署](../../../project-user-guide/application-workloads/deployments/)。 + 无状态服务是容器服务中最常用的服务类型。无状态服务定义容器组模板来控制容器组状态,包括滚动更新和回滚。您创建无状态服务时会同时创建**部署**工作负载。有关无状态服务的更多信息,请参见[部署](../../../project-user-guide/application-workloads/deployments/)。 - **有状态服务** @@ -44,11 +42,11 @@ weight: 10240 - **外部服务** - 与无状态服务和有状态服务不同,ExternalName 服务将一个服务映射到一个 DNS 名称,而不是映射到选择器。您需要在 **ExternalName** 字段中指定这些服务,该字段显示在 YAML 文件中的 `externalName`。 + 与无状态服务和有状态服务不同,外部服务将一个服务映射到一个 DNS 名称,而不是映射到选择器。您需要在**外部服务地址**字段中指定这些服务,该字段显示在 YAML 文件中的 `externalName`。 - **指定工作负载** - 使用现有 Pod 创建服务。 + 使用现有容器组创建服务。 - **编辑 YAML** @@ -66,12 +64,8 @@ weight: 10240 1. 在项目页面转到**应用负载**下的**服务**,点击**创建**。 - ![服务列表](/images/docs/zh-cn/project-user-guide/application-workloads/services/services-lists.PNG) - 2. 点击**无状态服务**。 - ![无状态服务](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form.PNG) - {{< notice note >}} 创建有状态服务的步骤和创建无状态服务的步骤基本相同。本示例仅使用创建无状态服务的过程来进行演示。 @@ -82,15 +76,13 @@ weight: 10240 1. 在弹出的对话框中,您可以看到字段**版本**已经预先填写了 `v1`。您需要输入服务的名称,例如 `demo-service`。完成后,点击**下一步**继续。 - ![输入基本信息](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-1.PNG) - - **名称**:服务和部署的名称,也是唯一标识符。 - **别名**:服务的别名,使资源更容易识别。 - **版本**:只能包含小写字母和数字,最长 16 个字符。 {{< notice tip >}} -**名称**的值用于两个配置中,一个是部署,另一个是服务。您可以启用右上角的**编辑模式**查看部署的清单文件以及服务的清单文件。下方是一个示例文件,供您参考。 +**名称**的值用于两个配置中,一个是部署,另一个是服务。您可以启用右上角的**编辑 YAML**查看部署的清单文件以及服务的清单文件。下方是一个示例文件,供您参考。 {{}} @@ -125,15 +117,13 @@ weight: 10240 app: xxx ``` -### 步骤 3:设置镜像 +### 步骤 3:设置容器组 -为服务添加容器镜像,详情请参见[设置镜像](../../../project-user-guide/application-workloads/deployments/#步骤-3设置镜像)。 - -![设置镜像](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-2.PNG) +为服务添加容器镜像,详情请参见[设置容器组](../../../project-user-guide/application-workloads/deployments/#步骤-3设置容器组)。 {{< notice tip >}} -有关仪表板上各项属性的详细说明,请直接参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。 +有关仪表板上各项属性的详细说明,请直接参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 {{}} @@ -141,15 +131,11 @@ weight: 10240 要为服务挂载存储卷,详情请参见[挂载存储卷](../../../project-user-guide/application-workloads/deployments/#步骤-4挂载存储卷)。 -![挂载存储卷](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-3.PNG) - ### 步骤 5:配置高级设置 -您可以设置节点调度策略并添加元数据,具体操作与[部署](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)中的说明相同。对于服务,您可以看到两个额外选项可用,即**外网访问**和**开启会话保持**。 +您可以设置节点调度策略并添加元数据,具体操作与[部署](../../../project-user-guide/application-workloads/deployments/#步骤-5配置高级设置)中的说明相同。对于服务,您可以看到两个额外选项可用,即**外部访问**和**会话保持**。 -![高级设置](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-4.PNG) - -- 外网访问 +- 外部访问 您可以通过两种方法向外暴露服务,即 NodePort 和 LoadBalancer。 @@ -163,7 +149,7 @@ weight: 10240 {{}} -- 开启会话保持 +- 会话保持 您可能想把从单个客户端会话发送的所有流量都路由到跨多个副本运行的应用的同一实例。这种做法降低了延迟,因此能更好地利用缓存。负载均衡的这种行为称为“会话保持 (Sticky Session)”。 @@ -173,44 +159,33 @@ weight: 10240 ### 详情页面 -1. 创建服务后,您可以点击右侧的 进一步编辑它,例如元数据(**名称**无法编辑)、配置文件、端口以及外网访问。 - - ![创建完成](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-finish.PNG) +1. 创建服务后,您可以点击右侧的 进一步编辑它,例如元数据(**名称**无法编辑)、配置文件、端口以及外部访问。 - **编辑**:查看和编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 - **编辑服务**:查看访问类型并设置选择器和端口。 - - **编辑外网访问**:编辑服务的外网访问方法。 - - **删除**:当您删除服务时,会在弹出对话框中显示关联资源。如果您勾选这些关联资源,则会与服务一同删除。 + - **编辑外部访问**:编辑服务的外部访问方法。 + - **删除**:当您删除服务时,会在弹出的对话框中显示关联资源。如果您勾选这些关联资源,则会与服务一同删除。 2. 点击服务名称可以转到它的详情页面。 - ![详情页面](/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-detail.PNG) - - 点击**更多操作**展开下拉菜单,菜单内容与服务列表中的下拉菜单相同。 - - Pod 列表提供 Pod 的详细信息(运行状态、节点、Pod IP 以及资源使用情况)。 - - 您可以点击 Pod 条目查看容器信息。 + - 容器组列表提供容器组的详细信息(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 - 点击容器日志图标查看容器的输出日志。 - - 您可以点击 Pod 名称查看 Pod 详情页面。 + - 您可以点击容器组名称来查看容器组详情页面。 ### 资源状态 -1. 点击**资源状态**选项卡以查看服务端口、工作负载和 Pod 信息。 +1. 点击**资源状态**选项卡以查看服务端口、工作负载和容器组信息。 - ![services-resource-status](/images/docs/zh-cn/project-user-guide/application-workloads/services/services-resource-status.png) - -2. 在**容器组**区域,点击 以刷新 Pod 信息,点击 / 以显示或隐藏每个 Pod 中的容器。 - - ![services](/images/docs/zh-cn/project-user-guide/application-workloads/services/services-pods.png) +2. 在**容器组**区域,点击 以刷新容器组信息,点击 / 以显示或隐藏每个容器组中的容器。 ### 元数据 点击**元数据**选项卡以查看服务的标签和注解。 -![services](/images/docs/zh-cn/project-user-guide/application-workloads/services/services-matadata.png) - ### 事件 点击**事件**选项卡以查看服务的事件。 -![services](/images/docs/zh-cn/project-user-guide/application-workloads/services/services-events.png) diff --git a/content/zh/docs/project-user-guide/application-workloads/statefulsets.md b/content/zh/docs/project-user-guide/application-workloads/statefulsets.md index 8fb39de9f..8a746359a 100644 --- a/content/zh/docs/project-user-guide/application-workloads/statefulsets.md +++ b/content/zh/docs/project-user-guide/application-workloads/statefulsets.md @@ -7,11 +7,11 @@ linkTitle: "有状态副本集" weight: 10220 --- -有状态副本集是用于管理有状态应用的工作负载 API 对象,负责一组 Pod 的部署和扩缩,并保证这些 Pod 的顺序性和唯一性。 +有状态副本集是用于管理有状态应用的工作负载 API 对象,负责一组容器组的部署和扩缩,并保证这些容器组的顺序性和唯一性。 -与部署类似,有状态副本集管理基于相同容器规范的 Pod。与部署不同的是,有状态副本集为其每个 Pod 维护一个粘性身份。这些 Pod 根据相同的规范而创建,但不能相互替换:每个 Pod 都有一个持久的标识符,无论 Pod 如何调度,该标识符均保持不变。 +与部署类似,有状态副本集管理基于相同容器规范的容器组。与部署不同的是,有状态副本集为其每个容器组维护一个粘性身份。这些容器组根据相同的规范而创建,但不能相互替换:每个容器组都有一个持久的标识符,无论容器组如何调度,该标识符均保持不变。 -如果您想使用存储卷为工作负载提供持久化存储,可以使用有状态副本集作为解决方案的一部分。尽管有状态副本集中的单个 Pod 容易出现故障,但持久的 Pod 标识符可以更容易地将现有存储卷匹配到替换任意故障 Pod 的新 Pod。 +如果您想使用存储卷为工作负载提供持久化存储,可以使用有状态副本集作为解决方案的一部分。尽管有状态副本集中的单个容器组容易出现故障,但持久的容器组标识符可以更容易地将现有存储卷匹配到替换任意故障容器组的新容器组。 对于需要满足以下一个或多个需求的应用程序来说,有状态副本集非常有用。 @@ -24,7 +24,7 @@ weight: 10220 ## 准备工作 -您需要创建一个企业空间、一个项目以及一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目以及一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 创建有状态副本集 @@ -34,72 +34,56 @@ weight: 10220 以 `project-regular` 身份登录控制台。转到项目的**应用负载**,选择**工作负载**,然后在**有状态副本集**选项卡下点击**创建**。 -![有状态副本集](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets.png) - ### 步骤 2:输入基本信息 为有状态副本集指定一个名称(例如 `demo-stateful`),然后点击**下一步**继续。 -![输入名称](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_1.png) +### 步骤 3:设置容器组 -### 步骤 3:设置镜像 - -1. 设置镜像前,请点击**容器组副本数量**中的 来定义 Pod(即容器组)的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 +1. 设置镜像前,请点击**容器组副本数量**中的 来定义容器组的副本数量,该参数显示在清单文件中的 `.spec.replicas` 字段。 {{< notice tip >}} -您可以启用右上角的**编辑模式**,查看 YAML 格式的有状态副本集清单文件。KubeSphere 使您可以直接编辑清单文件创建有状态副本集,或者您可以按照下列步骤使用仪表板创建有状态副本集。 +您可以启用右上角的**编辑 YAML**,查看 YAML 格式的有状态副本集清单文件。KubeSphere 使您可以直接编辑清单文件创建有状态副本集,或者您可以按照下列步骤使用仪表板创建有状态副本集。 {{}} - - ![设置镜像](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2.png) -2. 点击**添加容器镜像**。 - - ![添加容器镜像](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_btn.png) +2. 点击**添加容器**。 3. 输入镜像名称,该镜像可以来自公共 Docker Hub,也可以来自您指定的[私有仓库](../../../project-user-guide/configuration/image-registry/)。例如,在搜索栏输入 `nginx` 然后按**回车键**。 - ![输入镜像名称](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_1.png) - {{< notice note >}} - 在搜索栏输入镜像名称后,请记得按键盘上的**回车键**。 -- 如果想使用您的私有镜像仓库,您应该先通过**配置中心**下面的**密钥**[创建镜像仓库密钥](../../../project-user-guide/configuration/image-registry/)。 +- 如果想使用您的私有镜像仓库,您应该先通过**配置**下面的**保密字典**[创建镜像仓库保密字典](../../../project-user-guide/configuration/image-registry/)。 {{}} 4. 根据您的需求设置 CPU 和内存的资源请求和限制。有关更多信息,请参见[容器镜像设置中关于资源请求和资源限制的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 - ![资源请求](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulset-request-limit.png) +5. 点击**使用默认端口**以自动填充**端口设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 -5. 点击**使用默认端口**以自动填充**服务设置**,或者您可以自定义**协议**、**名称**和**容器端口**。 +6. 从下拉列表中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 -6. 从下拉菜单中选择镜像拉取策略。有关更多信息,请参见[容器镜像设置中关于镜像拉取策略的内容](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)。 - -7. 对于其他设置(**健康检查器**、**启动命令**、**环境变量**、**容器 Security Context** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 +7. 对于其他设置(**健康检查**、**启动命令**、**环境变量**、**容器安全上下文** 以及**同步主机时区**),您也可以在仪表板上配置它们。有关更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/#添加容器镜像)中对这些属性的详细说明。操作完成后,点击右下角的 **√** 继续。 8. 在下拉菜单中选择更新策略。建议您选择**滚动更新**。有关更多信息,请参见[更新策略](../container-image-settings/#更新策略)。 -9. 选择部署模式。有关更多信息,请参见[部署模式](../../../project-user-guide/application-workloads/container-image-settings/#部署模式)。 +9. 选择容器组调度规则。有关更多信息,请参见[容器组调度规则](../../../project-user-guide/application-workloads/container-image-settings/#容器组调度规则)。 -10. 完成容器镜像设置后,点击**下一步**继续。 +10. 完成容器组设置后,点击**下一步**继续。 ### 步骤 4:挂载存储卷 有状态副本集可以使用存储卷模板,但是您必须提前在**存储管理**中创建它。有关存储卷的更多信息,请访问[存储卷](../../../project-user-guide/storage/volumes/#挂载存储卷)。完成后,点击**下一步**继续。 -![挂载存储卷](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_3.png) - ### 步骤 5:配置高级设置 您可以在此部分中设置节点调度策略并添加元数据。完成操作后,点击**创建**完成创建有状态副本集的整个流程。 -![高级设置](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_4.png) +- **选择节点** -- **设置节点调度策略** - - 您可以让 Pod 副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 + 分配容器组副本在指定节点上运行。该参数在 `nodeSelector` 字段中指定。 - **添加元数据** @@ -109,63 +93,47 @@ weight: 10220 ### 详情页面 -1. 有状态副本集创建后会显示在下方的列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的有状态副本集。 +1. 有状态副本集创建后会显示列表中。您可以点击右边的 ,在弹出菜单中选择操作,修改您的有状态副本集。 - ![列表](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_list.png) - - - **编辑**:查看并编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该有状态副本集。 + - **编辑信息**:查看并编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该有状态副本集。 - **删除**:删除该有状态副本集。 2. 点击有状态副本集名称可以进入它的详情页面。 - ![详情页面](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail.png) - 3. 点击**更多操作**,显示您可以对该有状态副本集进行的操作。 - ![更多操作](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_operation_btn.png) - - - **版本回退**:选择要回退的版本。 + - **回退**:选择要回退的版本。 - **编辑服务**:设置端口来暴露容器镜像和服务端口。 - - **编辑配置模板**:配置更新策略、容器和存储卷。 - - **编辑配置文件**:查看、上传、下载或者更新 YAML 文件。 - - **重新部署**:重新部署该有状态副本集。 + - **编辑设置**:配置更新策略、容器和存储卷。 + - **编辑 YAML**:查看、上传、下载或者更新 YAML 文件。 + - **重新创建**:重新创建该有状态副本集。 - **删除**:删除该有状态副本集并返回有状态副本集列表页面。 -4. 点击**资源状态**选项卡,查看该有状态副本集的端口和 Pod 信息。 +4. 点击**资源状态**选项卡,查看该有状态副本集的端口和容器组信息。 - ![资源状态](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_state.png) + - **副本运行状态**:点击 来增加或减少容器组副本数量。 + - **容器组** - - **副本运行状态**:点击 来增加或减少 Pod 副本数量。 - - **Pod 详情** - - ![Pod 列表](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_pod.png) - - - Pod 列表中显示了 Pod 详情(运行状态、节点、Pod IP 以及资源使用情况)。 - - 您可以点击 Pod 条目查看容器信息。 + - 容器组列表中显示了容器组详情(运行状态、节点、容器组IP 以及资源使用情况)。 + - 您可以点击容器组条目查看容器信息。 - 点击容器日志图标查看容器的输出日志。 - - 您可以点击 Pod 名称查看 Pod 详情页面。 + - 您可以点击容器组名称查看容器组详情页面。 ### 版本记录 -修改工作负载的资源模板后,会生成一个新的日志并重新调度 Pod 进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新部署。 +修改工作负载的资源模板后,会生成一个新的日志并重新调度容器组进行版本更新。默认保存 10 个最近的版本。您可以根据修改日志进行重新创建。 ### 元数据 点击**元数据**选项卡以查看有状态副本集的标签和注解。 -![statefulsets](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-matadata.png) - ### 监控 1. 点击**监控**选项卡以查看有状态副本集的 CPU 使用量、内存使用量、网络流出速率和网络流入速率。 - ![statefulsets](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-monitoring.png) - -2. 点击右上角的下拉菜单以自定义时间范围和时间间隔。 - - ![statefulsets](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-time-range.png) +2. 点击右上角的下拉菜单以自定义时间范围和采样间隔。 3. 点击右上角的 statefulsets_autorefresh_start/ 以开始或停止自动刷新数据。 @@ -175,10 +143,6 @@ weight: 10220 点击**环境变量**选项卡查看有状态副本集的环境变量。 -![statefulsets](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-evn-variables.png) - ### 事件 点击**事件**查看有状态副本集的事件。 - -![statefulsets](/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-events.png) diff --git a/content/zh/docs/project-user-guide/application/app-template.md b/content/zh/docs/project-user-guide/application/app-template.md index 4475ac61c..bd62a79bd 100644 --- a/content/zh/docs/project-user-guide/application/app-template.md +++ b/content/zh/docs/project-user-guide/application/app-template.md @@ -1,6 +1,6 @@ --- title: "应用模板" -keywords: 'Kubernetes, chart, Helm, KubeSphere, 应用程序, 仓库, 模板' +keywords: 'Kubernetes, Chart, Helm, KubeSphere, 应用程序, 仓库, 模板' description: '了解应用模板的概念以及它们如何在企业内部帮助部署应用程序。' linkTitle: "应用模板" weight: 10110 @@ -14,15 +14,11 @@ weight: 10110 KubeSphere 的公共仓库也称作应用商店,企业空间中的每位租户都能访问。[上传应用的 Helm Chart](../../../workspace-administration/upload-helm-based-application/) 后,您可以部署应用来测试它的功能,并提交审核。最终待应用审核通过后,您可以选择将它发布至应用商店。有关更多信息,请参见[应用程序生命周期管理](../../../application-store/app-lifecycle-management/)。 -![应用商店](/images/docs/zh-cn/project-user-guide/applications/app-templates/app-store-1.png) - 对于私有仓库,只有拥有必要权限的用户才能在企业空间中[添加私有仓库](../../../workspace-administration/app-repository/import-helm-repository/)。一般来说,私有仓库基于对象存储服务构建,例如 MinIO。这些私有仓库在导入 KubeSphere 后会充当应用程序池,提供应用模板。 -![私有应用仓库](/images/docs/zh-cn/project-user-guide/applications/app-templates/private-app-repository-2.png) - {{< notice note >}} -对于 KubeSphere 中[作为 Helm Chart 上传的单个应用](../../../workspace-administration/upload-helm-based-application/),待审核通过并发布后,会和内置应用一同显示在应用商店中。此外,当您从私有应用仓库中选择应用模板时,在下拉列表中也可以看到**来自企业空间**,其中存储了这些作为 Helm Chart 上传的单个应用。 +对于 KubeSphere 中[作为 Helm Chart 上传的单个应用](../../../workspace-administration/upload-helm-based-application/),待审核通过并发布后,会和内置应用一同显示在应用商店中。此外,当您从私有应用仓库中选择应用模板时,在下拉列表中也可以看到**当前企业空间**,其中存储了这些作为 Helm Chart 上传的单个应用。 {{}} diff --git a/content/zh/docs/project-user-guide/application/compose-app.md b/content/zh/docs/project-user-guide/application/compose-app.md index 721763eca..058504cc8 100644 --- a/content/zh/docs/project-user-guide/application/compose-app.md +++ b/content/zh/docs/project-user-guide/application/compose-app.md @@ -12,26 +12,26 @@ weight: 10140 ## 准备工作 -- 您需要为本教程创建一个企业空间、一个项目以及一个帐户 (`project-regular`)。该帐户需要被邀请至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要为本教程创建一个企业空间、一个项目以及一个用户 (`project-regular`)。该用户需要被邀请至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - `project-admin` 需要[设置项目网关](../../../project-administration/project-gateway/),以便 `project-regular` 能在创建应用时定义域名。 ## 构建自制应用的微服务 -1. 登录 KubeSphere 的 Web 控制台,导航到项目**应用负载**中的**应用**。在**自制应用**选项卡中,点击**构建自制应用**。 +1. 登录 KubeSphere 的 Web 控制台,导航到项目**应用负载**中的**应用**。在**自制应用**选项卡中,点击**创建**。 2. 设置应用名称(例如 `bookinfo`)并点击**下一步**。 -3. 在**服务组件**页面,您需要构建自制应用的微服务。点击**添加服务**,选择**无状态服务**。 +3. 在**服务**页面,您需要构建自制应用的微服务。点击**创建服务**,选择**无状态服务**。 4. 设置服务名称(例如 `productpage`)并点击**下一步**。 {{< notice note >}} - 您可以直接在面板上创建服务,或者启用右上角的**编辑模式**以编辑 YAML 文件。 + 您可以直接在面板上创建服务,或者启用右上角的**编辑 YAML**以编辑 YAML 文件。 {{}} -5. 点击**容器镜像**下的**添加容器镜像**,在搜索栏中输入 `kubesphere/examples-bookinfo-productpage-v1:1.13.0` 以使用 Docker Hub 镜像。 +5. 点击**容器**下的**添加容器**,在搜索栏中输入 `kubesphere/examples-bookinfo-productpage-v1:1.13.0` 以使用 Docker Hub 镜像。 {{< notice note >}} @@ -39,11 +39,11 @@ weight: 10140 {{}} -6. 点击**使用默认端口**。有关更多镜像设置的信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。点击右下角的 **√** 和**下一步**以继续操作。 +6. 点击**使用默认端口**。有关更多镜像设置的信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。点击右下角的 **√** 和**下一步**以继续操作。 -7. 在**挂载存储**页面,[添加存储卷](../../../project-user-guide/storage/volumes/)或点击**下一步**以继续操作。 +7. 在**存储卷设置**页面,[添加存储卷](../../../project-user-guide/storage/volumes/)或点击**下一步**以继续操作。 -8. 在**高级设置**页面,直接点击**添加**。 +8. 在**高级设置**页面,直接点击**创建**。 9. 同样,为该应用添加其他三个微服务。以下是相应的镜像信息: @@ -55,9 +55,7 @@ weight: 10140 10. 添加微服务完成后,点击**下一步**。 -11. 在**外网访问**页面,点击**添加路由规则**。在**指定域名**选项卡中,为您的应用设置域名(例如 `demo.bookinfo`)并在**协议**字段选择 `http`。在`路径`一栏,选择服务 `productpage` 以及端口 `9080`。点击**确定**以继续操作。 - - ![route](/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/route.png) +11. 在**路由设置**页面,点击**添加路由规则**。在**指定域名**选项卡中,为您的应用设置域名(例如 `demo.bookinfo`)并在**协议**字段选择 `HTTP`。在`路径`一栏,选择服务 `productpage` 以及端口 `9080`。点击**确定**以继续操作。 {{< notice note >}} @@ -86,11 +84,7 @@ weight: 10140 2. 在**自制应用**中,点击刚才创建的应用。 -3. 在**应用组件**中,点击**点击访问**以访问该应用。 - - ![click-to-visit](/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png) - - ![dashboard](/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/dashboard.png) +3. 在**资源状态**中,点击**路由**下的**访问服务**以访问该应用。 {{< notice note >}} @@ -100,5 +94,3 @@ weight: 10140 4. 分别点击 **Normal user** 和 **Test user** 以查看其他**服务**。 - ![review-page](/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/review-page.png) - diff --git a/content/zh/docs/project-user-guide/application/deploy-app-from-appstore.md b/content/zh/docs/project-user-guide/application/deploy-app-from-appstore.md index 7127d8381..c40b5dab9 100644 --- a/content/zh/docs/project-user-guide/application/deploy-app-from-appstore.md +++ b/content/zh/docs/project-user-guide/application/deploy-app-from-appstore.md @@ -13,7 +13,7 @@ weight: 10130 ## 准备工作 - 您需要启用 [OpenPitrix (App Store)](../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个用户帐户(例如 `project-regular`)。该帐户必须是已邀请至该项目的平台普通用户,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须被邀请至该项目,并具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 动手实验 @@ -23,60 +23,38 @@ weight: 10130 {{< notice note >}} - 您也可以在您的项目中前往**应用负载**下的**应用**页面,点击**部署新应用**,并选择**来自应用商店**进入应用商店。 + 您也可以在您的项目中前往**应用负载**下的**应用**页面,点击**创建**,并选择**来自应用商店**进入应用商店。 {{}} -2. 找到 NGINX,在**应用信息**页面点击**部署**。 - - ![nginx-in-app-store](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png) - - ![deploy-nginx](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png) +2. 找到并点击 NGINX,在**应用信息**页面点击**安装**。请确保在**应用部署须知**对话框中点击**确认**。 3. 设置应用的名称和版本,确保 NGINX 部署在 `demo-project` 项目中,点击**下一步**。 - ![confirm-deployment](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png) - -4. 在**应用配置**页面,设置应用部署的副本数,根据需要启用或禁用 Ingress,然后点击**部署**。 - - ![edit-config-nginx](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png) - - ![manifest-file](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png) +4. 在**应用设置**页面,设置应用部署的副本数,根据需要启用或禁用 Ingress,然后点击**安装**。 {{< notice note >}} - 如需为 NGINX 设置更多的参数, 可点击 **YAML** 后的拨动开关打开应用的 YAML 配置文件,并在配置文件中设置相关参数。 + 如需为 NGINX 设置更多的参数, 可点击 **YAML** 后的切换开关打开应用的 YAML 配置文件,并在配置文件中设置相关参数。 {{}} 5. 等待应用创建完成并开始运行。 - ![nginx-running](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png) - ### 步骤 2:访问 NGINX 要从集群外访问 NGINX,您需要先用 NodePort 暴露该应用。 -1. 打开**服务**页面并点击 NGINX 的服务名称。 +1. 在 `demo-project` 项目中打开**服务**页面并点击 NGINX 的服务名称。 - ![nginx-service](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png) - -2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 - - ![edit-internet-access](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png) +2. 在服务详情页面,点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![nodeport](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png) - -4. 您可以在**服务端口**区域查看暴露的端口。 - - ![exposed-port](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png) +4. 在**端口**区域查看暴露的端口。 5. 用 `:` 地址访问 NGINX。 - ![access-nginx](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png) - {{< notice note >}} 取决于您的 Kubernetes 集群的部署位置,您可能需要在安全组中放行端口并配置相关的端口转发规则。 diff --git a/content/zh/docs/project-user-guide/application/deploy-app-from-template.md b/content/zh/docs/project-user-guide/application/deploy-app-from-template.md index 3130c623f..ee9b6ce41 100644 --- a/content/zh/docs/project-user-guide/application/deploy-app-from-template.md +++ b/content/zh/docs/project-user-guide/application/deploy-app-from-template.md @@ -14,7 +14,7 @@ weight: 10120 ## 准备工作 - 您需要启用 [OpenPitrix (App Store)](../../../pluggable-components/app-store/)。 -- 您需要先完成[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)教程。您必须创建一个企业空间、一个项目和两个用户帐户(`ws-admin ` 和 `project-regular`)。`ws-admin` 必须被授予企业空间中的 `workspace-admin` 角色, `project-regular` 必须被授予项目中的 `operator` 角色。 +- 您需要先完成[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)教程。您必须创建一个企业空间、一个项目和两个用户(`ws-admin ` 和 `project-regular`)。`ws-admin` 必须被授予企业空间中的 `workspace-admin` 角色, `project-regular` 必须被授予项目中的 `operator` 角色。 ## 动手实验 @@ -22,15 +22,9 @@ weight: 10120 1. 以 `ws-admin` 用户登录 KubeSphere Web 控制台。在您的企业空间中,进入**应用管理**下的**应用仓库**页面,并点击**添加仓库**。 - ![add-app-repo](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png) +2. 在弹出的对话框中,将应用仓库名称设置为 `test-repo`,将应用仓库的 URL 设置为 `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/`。点击**验证**对 URL 进行验证,根据您的需要设置**同步周期**,再点击**确定**。 -2. 在弹出的对话框中,将应用仓库名称设置为 `test-repo`,将应用仓库的 URL 设置为 `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/`,点击**验证**对 URL 进行验证,再点击**确定**。 - - ![input-repo-info](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png) - -3. 应用仓库导入成功后会显示在如下图所示的列表中。 - - ![repository-list](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png) +3. 应用仓库导入成功后会显示在列表中。 {{< notice note >}} @@ -40,49 +34,35 @@ weight: 10120 ### 步骤 2:从应用模板部署应用 -1. 登出 KubeSphere 并以 `project-regular` 用户重新登录。在您的项目中,进入**应用负载**下的**应用**页面,再点击**部署新应用**。 - - ![create-new-app](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png) +1. 登出 KubeSphere 并以 `project-regular` 用户重新登录。在您的项目中,进入**应用负载**下的**应用**页面,再点击**创建**。 2. 在弹出的对话框中选择**来自应用模板**。 - ![select-app-templates](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png) - **来自应用商店**:选择内置的应用和以 Helm Chart 形式单独上传的应用。 **来自应用模板**:从私有应用仓库和企业空间应用池选择应用。 3. 从下拉列表中选择之前添加的私有应用仓库 `test-repo`。 - ![private-app-template](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png) - {{< notice note >}} - 下拉列表中的**来自企业空间**选项表示企业空间应用池,包含以 Helm Chart 形式上传的应用。这些应用也属于应用模板。 + 下拉列表中的**当前企业空间**选项表示企业空间应用池,包含以 Helm Chart 形式上传的应用。这些应用也属于应用模板。 {{}} 4. 在搜索框中输入 `grafana` 找到该应用,点击搜索结果进行部署。 - ![search-grafana](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png) - {{< notice note >}} 本教程使用的应用仓库与 Google Helm 仓库同步。由于其中的 Helm Chart 由不同的组织维护,部分应用可能无法部署成功。 {{}} -5. 您可以查看应用信息和配置文件,在**版本**下拉列表中选择版本,然后点击**部署**。 - - ![deploy-grafana](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png) +5. 可以查看其应用信息和配置文件,在**版本**下拉列表中选择版本,然后点击**安装**。 6. 设置应用名称,确认应用版本和部署位置,点击**下一步**。 - ![confirm-info](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png) - -7. 在**应用配置**页面,您可以手动编辑清单文件或直接点击**部署**。 - - ![app-config](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png) +7. 在**应用设置**页面,手动编辑清单文件或直接点击**安装**。 8. 等待 Grafana 创建完成并开始运行。 @@ -92,37 +72,19 @@ weight: 10120 1. 打开**服务**页面,点击 Grafana 的服务名称。 - ![grafana-services](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png) - -2. 点击**更多操作**,在下拉菜单中选择**编辑外网访问**。 - - ![edit-access](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png) +2. 点击**更多操作**,在下拉菜单中选择**编辑外部访问**。 3. 将**访问方式**设置为 **NodePort** 并点击**确定**。有关更多信息,请参见[项目网关](../../../project-administration/project-gateway/)。 - ![nodeport](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png) - -4. 您可以在**服务端口**区域查看暴露的端口。 - - ![exposed-port](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png) +4. 在**端口**区域查看暴露的端口。 ### 步骤 4:访问 Grafana -1. 您需要获取用户名和密码才能登录 Grafana 主页。前往**密钥**页面,点击与应用名称相同的条目。 - - ![grafana-secret](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png) +1. 您需要获取用户名和密码才能登录 Grafana 主页。前往**配置**下的**保密字典**页面,点击与应用名称相同的条目。 2. 在详情页面,点击眼睛图标查看用户名和密码。 - ![secret-page](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png) - - ![click-eye-icon](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png) - -2. 用 `:` 地址访问 Grafana。 - - ![grafana-UI](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png) - - ![home-page](/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png) +3. 用 `:` 地址访问 Grafana。 {{< notice note >}} diff --git a/content/zh/docs/project-user-guide/configuration/configmaps.md b/content/zh/docs/project-user-guide/configuration/configmaps.md index 92a57524c..630732e1c 100644 --- a/content/zh/docs/project-user-guide/configuration/configmaps.md +++ b/content/zh/docs/project-user-guide/configuration/configmaps.md @@ -1,77 +1,71 @@ --- -title: "ConfigMap" +title: "配置字典" keywords: 'KubeSphere, Kubernetes, ConfigMap' -description: '了解如何在 KubeSphere 中创建 ConfigMap。' -linkTitle: "ConfigMap" +description: '了解如何在 KubeSphere 中创建配置字典。' +linkTitle: "配置字典" weight: 10420 --- -Kubernetes [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) 以键值对的形式存储配置数据。ConfigMap 资源可用于向 Pod 中注入配置数据。ConfigMap 对象中存储的数据可以被 `ConfigMap` 类型的卷引用,并由 Pod 中运行的容器化应用使用。ConfigMap 通常用于以下场景: +Kubernetes [配置字典(ConfigMap)](https://kubernetes.io/docs/concepts/configuration/configmap/) 以键值对的形式存储配置数据。配置字典资源可用于向容器组中注入配置数据。配置字典对象中存储的数据可以被 `ConfigMap` 类型的卷引用,并由容器组中运行的容器化应用使用。配置字典通常用于以下场景: - 设置环境变量的值。 - 设置容器中的命令参数。 - 在卷中创建配置文件。 -本教程演示如何在 KubeSphere 中创建 ConfigMap。 +本教程演示如何在 KubeSphere 中创建配置字典。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 -## 创建 ConfigMap +## 创建配置字典 -1. 以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置中心**下的**配置**,然后点击**创建**。 +1. 以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置**下的**配置字典**,然后点击**创建**。 -2. 在出现的对话框中,设置 ConfigMap 的名称(例如 `demo-configmap`),然后点击**下一步**。 +2. 在弹出的对话框中,设置配置字典的名称(例如 `demo-configmap`),然后点击**下一步**。 {{< notice tip >}} -您可以在对话框右上角启用**编辑模式**来查看 ConfigMap 的 YAML 清单文件,并通过直接编辑清单文件来创建 ConfigMap。您也可以继续执行后续步骤在控制台上创建 ConfigMap。 +您可以在对话框右上角启用**编辑 YAML** 来查看配置字典的 YAML 清单文件,并通过直接编辑清单文件来创建配置字典。您也可以继续执行后续步骤在控制台上创建配置字典。 {{}} -3. 在**配置设置**选项卡,点击**添加数据**以配置键值对。 +3. 在**数据设置**选项卡,点击**添加数据**以配置键值对。 4. 输入一个键值对。下图为示例: - ![key-value](/images/docs/zh-cn/project-user-guide/configurations/configmaps/key-value.jpg) - {{< notice note >}} - 配置的键值对会显示在清单文件中的 `data` 字段下。 -- 目前 KubeSphere 控制台只支持在 ConfigMap 中配置键值对。未来版本将会支持添加配置文件的路径来创建 ConfigMap。 +- 目前 KubeSphere 控制台只支持在配置字典中配置键值对。未来版本将会支持添加配置文件的路径来创建配置字典。 {{}} 5. 点击对话框右下角的 **√** 以保存配置。您可以再次点击**添加数据**继续配置更多键值对。 -6. 点击**创建**以生成 ConfigMap。 +6. 点击**创建**以生成配置字典。 -## 查看 ConfigMap 详情 +## 查看配置字典详情 -1. ConfigMap 创建后会显示在**配置**页面。您可以点击右侧的 ,并从下拉菜单中选择操作来修改 ConfigMap。 +1. 配置字典创建后会显示在**配置字典**页面。您可以点击右侧的 ,并从下拉菜单中选择操作来修改配置字典。 - **编辑**:查看和编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或更新 YAML 文件。 - - **修改配置**:修改 ConfigMap 键值对。 - - **删除**:删除 ConfigMap。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改配置字典键值对。 + - **删除**:删除配置字典。 -2. 点击 ConfigMap 名称打开 ConfigMap 详情页面。在**详情**选项卡,您可以查看 ConfigMap 的所有键值对。 +2. 点击配置字典名称打开其详情页面。在**数据**选项卡,您可以查看配置字典的所有键值对。 - ![detail-page](/images/docs/zh-cn/project-user-guide/configurations/configmaps/detail-page.png) +3. 点击**更多操作**对配置字典进行其他操作。 -3. 点击**更多操作**对 ConfigMap 进行其他操作。 - - - **编辑配置文件**:查看、上传、下载或更新 YAML 文件。 - - **修改配置**:修改 ConfigMap 键值对。 - - **删除**:删除 ConfigMap 并返回 ConfigMap 列表页面。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改配置字典键值对。 + - **删除**:删除配置字典并返回配置字典列表页面。 -4. 点击**编辑信息**来查看和编辑 ConfigMap 的基本信息。 +4. 点击**编辑信息**来查看和编辑配置字典的基本信息。 -## 使用 ConfigMap +## 使用配置字典 -在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您可以用 ConfigMap 为容器添加环境变量。您可以在**容器镜像**页面勾选**环境变量**,点击**引用配置文件或密钥**,然后从下拉列表中选择一个 ConfigMap。 - -![use-configmap](/images/docs/zh-cn/project-user-guide/configurations/configmaps/use-configmap.jpg) +在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您可以用配置字典为容器添加环境变量。您可以在**添加容器**页面勾选**环境变量**,点击**引用配置字典或保密字典**,然后从下拉列表中选择一个配置字典。 diff --git a/content/zh/docs/project-user-guide/configuration/image-registry.md b/content/zh/docs/project-user-guide/configuration/image-registry.md index 9f2adfbbf..8178d0108 100644 --- a/content/zh/docs/project-user-guide/configuration/image-registry.md +++ b/content/zh/docs/project-user-guide/configuration/image-registry.md @@ -1,6 +1,6 @@ --- title: "镜像仓库" -keywords: 'KubeSphere, Kubernetes, Docker, 密钥' +keywords: 'KubeSphere, Kubernetes, Docker, 保密字典' description: '了解如何在 KubeSphere 中创建镜像仓库。' linkTitle: "镜像仓库" weight: 10430 @@ -8,52 +8,44 @@ weight: 10430 Docker 镜像是一个只读的模板,可用于部署容器服务。每个镜像都有一个唯一标识符(即`镜像名称:标签`)。例如,一个镜像可以包含只安装有 Apache 和几个应用的完整的 Ubuntu 操作系统软件包。镜像仓库可用于存储和分发 Docker 镜像。 -本教程演示如何为不同的镜像仓库创建密钥。 +本教程演示如何为不同的镜像仓库创建保密字典。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 -## 创建密钥 +## 创建保密字典 -创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,除了从公共仓库选择镜像,您还可以从私有仓库选择镜像。要使用私有仓库中的镜像,您必须先为私有仓库创建密钥,以便在 KubeSphere 中集成该私有仓库。 +创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,除了从公共仓库选择镜像,您还可以从私有仓库选择镜像。要使用私有仓库中的镜像,您必须先为私有仓库创建保密字典,以便在 KubeSphere 中集成该私有仓库。 -### 步骤 1:进入密钥页面 +### 步骤 1:进入保密字典页面 -以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目,在左侧导航栏中选择**配置中心**下的**密钥**,然后点击**创建**。 - -![open-dashboard](/images/docs/zh-cn/project-user-guide/configurations/image-registries/open-dashboard.png) +以 `project-regular` 用户登录 KubeSphere Web 控制台并进入项目,在左侧导航栏中选择**配置**下的**保密字典**,然后点击**创建**。 ### 步骤 2:配置基本信息 -设置密钥的名称(例如 `demo-registry-secret`),然后点击**下一步**。 +设置保密字典的名称(例如 `demo-registry-secret`),然后点击**下一步**。 {{< notice tip >}} -您可以在对话框右上角启用**编辑模式**来查看密钥的 YAML 清单文件,并通过直接编辑清单文件来创建密钥。您也可以继续执行后续步骤在控制台上创建密钥。 +您可以在对话框右上角启用**编辑 YAML** 来查看保密字典的 YAML 清单文件,并通过直接编辑清单文件来创建保密字典。您也可以继续执行后续步骤在控制台上创建保密字典。 {{}} -![create-secret](/images/docs/zh-cn/project-user-guide/configurations/image-registries/create-secret.png) - ### 步骤 3:配置镜像仓库信息 -将**类型**设置为 **kubernetes.io/dockerconfigjson(镜像仓库密钥)**。要在创建应用负载时使用私有仓库中的镜像,您需要配置以下字段: +将**类型**设置为 **镜像仓库信息**。要在创建应用负载时使用私有仓库中的镜像,您需要配置以下字段: - **仓库地址**:镜像仓库的地址,其中包含创建应用负载时需要使用的镜像。 - **用户名**:登录镜像仓库所需的用户名。 - **密码**:登录镜像仓库所需的密码。 - **邮箱**(可选):您的邮箱地址。 -![image-registry-info](/images/docs/zh-cn/project-user-guide/configurations/image-registries/image-registry-info.png) - #### 添加 Docker Hub 仓库 -1. 在 [Docker Hub](https://hub.docker.com/) 上添加镜像仓库之前,您需要注册一个 Docker Hub 帐户。在**密钥设置**页面,将**仓库地址**设置为 `docker.io`,将**用户名**和**密码**分别设置为您的 Docker ID 和密码,然后点击**验证**以检查地址是否可用。 +1. 在 [Docker Hub](https://hub.docker.com/) 上添加镜像仓库之前,您需要注册一个 Docker Hub 帐户。在**保密字典设置**页面,将**仓库地址**设置为 `docker.io`,将**用户名**和**密码**分别设置为您的 Docker ID 和密码,然后点击**验证**以检查地址是否可用。 - ![validate-registry-address](/images/docs/zh-cn/project-user-guide/configurations/image-registries/validate-registry-address.png) - -2. 点击**创建**。密钥创建后会显示在**密钥**界面。有关密钥创建后如何编辑密钥,请参阅[查看密钥详情](../../../project-user-guide/configuration/secrets/#查看密钥详情)。 +2. 点击**创建**。保密字典创建后会显示在**保密字典**界面。有关保密字典创建后如何编辑保密字典,请参阅[查看保密字典详情](../../../project-user-guide/configuration/secrets/#查看保密字典详情)。 #### 添加 Harbor 镜像仓库 @@ -89,9 +81,7 @@ Docker 镜像是一个只读的模板,可用于部署容器服务。每个镜 sudo systemctl restart docker ``` -3. 在 KubeSphere 控制台上进入创建密钥的**密钥设置**页面,将**类型**设置为**镜像仓库密钥**,将**仓库地址**设置为您的 Harbor IP 地址,并设置用户名和密码。 - - ![harbor-address](/images/docs/zh-cn/project-user-guide/configurations/image-registries/harbor-address.png) +3. 在 KubeSphere 控制台上进入创建保密字典的**数据设置**页面,将**类型**设置为**镜像仓库信息**,将**仓库地址**设置为您的 Harbor IP 地址,并设置用户名和密码。 {{< notice note >}} @@ -99,7 +89,7 @@ Docker 镜像是一个只读的模板,可用于部署容器服务。每个镜 {{}} -4. 点击**创建**。密钥创建后会显示在**密钥**页面。有关密钥创建后如何编辑密钥,请参阅[查看密钥详情](../../../project-user-guide/configuration/secrets/#查看密钥详情)。 +4. 点击**创建**。保密字典创建后会显示在**保密字典**页面。有关保密字典创建后如何编辑保密字典,请参阅[查看保密字典详情](../../../project-user-guide/configuration/secrets/#查看保密字典详情)。 **HTTPS** @@ -107,6 +97,4 @@ Docker 镜像是一个只读的模板,可用于部署容器服务。每个镜 ## 使用镜像仓库 -如果您已提前创建了私有镜像仓库的密钥,您可以选择私有镜像仓库中的镜像。例如,创建[部署](../../../project-user-guide/application-workloads/deployments/)时,您可以在**容器镜像**页面点击**镜像**下拉列表选择一个仓库,然后输入镜像名称和标签使用镜像。 - -![use-image-registry](/images/docs/zh-cn/project-user-guide/configurations/image-registries/use-image-registry.png) \ No newline at end of file +如果您已提前创建了私有镜像仓库的保密字典,您可以选择私有镜像仓库中的镜像。例如,创建[部署](../../../project-user-guide/application-workloads/deployments/)时,您可以在**添加容器**页面点击**镜像**下拉列表选择一个仓库,然后输入镜像名称和标签使用镜像。 diff --git a/content/zh/docs/project-user-guide/configuration/secrets.md b/content/zh/docs/project-user-guide/configuration/secrets.md index 57cb0fe0d..142dd39cd 100644 --- a/content/zh/docs/project-user-guide/configuration/secrets.md +++ b/content/zh/docs/project-user-guide/configuration/secrets.md @@ -1,91 +1,71 @@ --- -title: "密钥" -keywords: 'KubeSphere, Kubernetes, 密钥' -description: '了解如何在 KubeSphere 中创建密钥。' -linkTitle: "密钥" +title: "保密字典" +keywords: 'KubeSphere, Kubernetes, 保密字典' +description: '了解如何在 KubeSphere 中创建保密字典。' +linkTitle: "保密字典" weight: 10410 --- -Kubernetes [密钥 (Secret)](https://kubernetes.io/zh/docs/concepts/configuration/secret/) 可用于存储和管理密码、OAuth 令牌和 SSH 密钥等敏感信息。Pod 可以通过[三种方式](https://kubernetes.io/zh/docs/concepts/configuration/secret/#overview-of-secrets)使用密钥: +Kubernetes [保密字典 (Secret)](https://kubernetes.io/zh/docs/concepts/configuration/secret/) 可用于存储和管理密码、OAuth 令牌和 SSH 保密字典等敏感信息。容器组可以通过[三种方式](https://kubernetes.io/zh/docs/concepts/configuration/secret/#overview-of-secrets)使用保密字典: -- 作为挂载到 Pod 中容器化应用上的卷中的文件。 -- 作为 Pod 中容器使用的环境变量。 -- 作为 kubelet 为 Pod 拉取镜像时的镜像仓库凭证。 +- 作为挂载到容器组中容器化应用上的卷中的文件。 +- 作为容器组中容器使用的环境变量。 +- 作为 kubelet 为容器组拉取镜像时的镜像仓库凭证。 -本教程演示如何在 KubeSphere 中创建密钥。 +本教程演示如何在 KubeSphere 中创建保密字典。 ## 准备工作 -您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 -## 创建密钥 +## 创建保密字典 -### 步骤 1:进入密钥页面 +### 步骤 1:进入保密字典页面 -以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置中心**下的**密钥**,然后点击**创建**。 - -![create-secrets](/images/docs/zh-cn/project-user-guide/configurations/secrets/create-secrets.png) +以 `project-regular` 用户登录控制台并进入项目,在左侧导航栏中选择**配置**下的**保密字典**,然后点击**创建**。 ### 步骤 2:配置基本信息 -设置密钥的名称(例如 `demo-secret`),然后点击**下一步**。 +设置保密字典的名称(例如 `demo-secret`),然后点击**下一步**。 {{< notice tip >}} -您可以在对话框右上角启用**编辑模式**来查看密钥的 YAML 清单文件,并通过直接编辑清单文件来创建密钥。您也可以继续执行后续步骤在控制台上创建密钥。 +您可以在对话框右上角启用**编辑 YAML** 来查看保密字典的 YAML 清单文件,并通过直接编辑清单文件来创建保密字典。您也可以继续执行后续步骤在控制台上创建保密字典。 {{}} -![set-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/set-secret.png) +### 步骤 3:设置保密字典 -### 步骤 3:设置密钥 - -1. 在**密钥设置**选项卡,从**类型**下拉列表中选择密钥类型。您可以在 KubeSphere 中创建以下密钥,类型对应 YAML 文件中的 `type` 字段。 - - ![secret-type](/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-type.png) +1. 在**数据设置**选项卡,从**类型**下拉列表中选择保密字典类型。您可以在 KubeSphere 中创建以下保密字典,类型对应 YAML 文件中的 `type` 字段。 {{< notice note >}} - 对于所有的密钥类型,配置在清单文件中 `data` 字段的所有键值对的值都必须是 base64 编码的字符串。KubeSphere 会自动将您在控制台上配置的值转换成 base64 编码并保存到 YAML 文件中。例如,密钥类型为**默认**时,如果您在**添加数据**页面将**键**和**值**分别设置为 `password` 和 `hello123`,YAML 文件中显示的实际值为 `aGVsbG8xMjM=`(即 `hello123` 的 base64 编码,由 KubeSphere 自动转换)。 + 对于所有的保密字典类型,配置在清单文件中 `data` 字段的所有键值对的值都必须是 base64 编码的字符串。KubeSphere 会自动将您在控制台上配置的值转换成 base64 编码并保存到 YAML 文件中。例如,保密字典类型为**默认**时,如果您在**添加数据**页面将**键**和**值**分别设置为 `password` 和 `hello123`,YAML 文件中显示的实际值为 `aGVsbG8xMjM=`(即 `hello123` 的 base64 编码,由 KubeSphere 自动转换)。 {{}} - - **Opaque(默认)**:对应 Kubernetes 的 [Opaque](https://kubernetes.io/zh/docs/concepts/configuration/secret/#opaque-secret) 密钥类型,同时也是 Kubernetes 的默认密钥类型。您可以用此类型密钥创建任意自定义数据。点击**添加数据**为其添加键值对。 + - **默认**:对应 Kubernetes 的 [Opaque](https://kubernetes.io/zh/docs/concepts/configuration/secret/#opaque-secret) 保密字典类型,同时也是 Kubernetes 的默认保密字典类型。您可以用此类型保密字典创建任意自定义数据。点击**添加数据**为其添加键值对。 - ![default-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/default-secret.png) + - **TLS 信息**:对应 Kubernetes 的 [kubernetes.io/tls](https://kubernetes.io/zh/docs/concepts/configuration/secret/#tls-secret) 保密字典类型,用于存储证书及其相关保密字典。这类数据通常用于 TLS 场景,例如提供给应用路由 (Ingress) 资源用于终结 TLS 链接。使用此类型的保密字典时,您必须为其指定**凭证**和**私钥**,分别对应 YAML 文件中的 `tls.crt` 和 `tls.key` 字段。 - - **kubernetes.io/tls (TLS)**:对应 Kubernetes 的 [kubernetes.io/tls](https://kubernetes.io/zh/docs/concepts/configuration/secret/#tls-secret) 密钥类型,用于存储证书及其相关密钥。这类数据通常用于 TLS 场景,例如提供给应用路由 (Ingress) 资源用于终结 TLS 链接。使用此类型的密钥时,您必须为其指定**凭证**和**私钥**,分别对应 YAML 文件中的 `tls.crt` 和 `tls.key` 字段。 + - **镜像仓库信息**:对应 Kubernetes 的 [kubernetes.io/dockerconfigjson](https://kubernetes.io/zh/docs/concepts/configuration/secret/#docker-config-secrets) 保密字典类型,用于存储访问 Docker 镜像仓库所需的凭证。有关更多信息,请参阅[镜像仓库](../image-registry/)。 - ![tls](/images/docs/zh-cn/project-user-guide/configurations/secrets/tls.png) + - **用户名和密码**:对应 Kubernetes 的 [kubernetes.io/basic-auth](https://kubernetes.io/zh/docs/concepts/configuration/secret/#basic-authentication-secret) 保密字典类型,用于存储基本身份认证所需的凭证。使用此类型的保密字典时,您必须为其指定**用户名**和**密码**,分别对应 YAML 文件中的 `username` 和 `password` 字段。 - - **kubernetes.io/dockerconfigjson(镜像仓库密钥)**:对应 Kubernetes 的 [kubernetes.io/dockerconfigjson](https://kubernetes.io/zh/docs/concepts/configuration/secret/#docker-config-secrets) 密钥类型,用于存储访问 Docker 镜像仓库所需的凭证。有关更多信息,请参阅[镜像仓库](../image-registry/)。 +2. 本教程以默认类型为例。点击**添加数据**,将**键**设置为 `MYSQL_ROOT_PASSWORD` 并将**值**设置为 `123456`,为 MySQL 设置保密字典。 - ![image-registry-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/image-registry-secret.png) +3. 点击对话框右下角的 **√** 以确认配置。您可以继续为保密字典添加键值对或点击**创建**完成操作。有关保密字典使用的更多信息,请参阅[创建并发布 WordPress](../../../quick-start/wordpress-deployment/#任务-3创建应用程序)。 - - **kubernetes.io/basic-auth(帐户密码密钥)**:对应 Kubernetes 的 [kubernetes.io/basic-auth](https://kubernetes.io/zh/docs/concepts/configuration/secret/#basic-authentication-secret) 密钥类型,用于存储基本身份认证所需的凭证。使用此类型的密钥时,您必须为其指定**用户名**和**密码**,分别对应 YAML 文件中的 `username` 和 `password` 字段。 +## 查看保密字典详情 - ![account-password-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/account-password-secret.png) +1. 保密字典创建后会显示在如图所示的列表中。您可以点击右边的 ,并从下拉菜单中选择操作来修改保密字典。 -2. 本教程以默认类型为例。点击**添加数据**,将**键 (Key)** 设置为 `MYSQL_ROOT_PASSWORD` 并将**值 (Value)** 设置为 `123456`,为 MySQL 设置密钥。 + - **编辑信息**:查看和编辑基本信息。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑设置**:修改保密字典键值对。 + - **删除**:删除保密字典。 - ![enter-key](/images/docs/zh-cn/project-user-guide/configurations/secrets/enter-key.png) - -3. 点击对话框右下角的 **√** 以确认配置。您可以继续为密钥添加键值对或点击**创建**完成操作。有关密钥使用的更多信息,请参阅[创建并发布 WordPress](../../../quick-start/wordpress-deployment/#任务-3创建应用程序)。 - -## 查看密钥详情 - -1. 密钥创建后会显示在如图所示的列表中。您可以点击右边的 ,并从下拉菜单中选择操作来修改密钥。 - - ![secret-list](/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-list.png) - - - **编辑**:查看和编辑基本信息。 - - **编辑配置文件**:查看、上传、下载或更新 YAML 文件。 - - **编辑密钥**:修改密钥键值对。 - - **删除**:删除密钥。 - -2. 点击密钥名称打开密钥详情页面。在**详情**选项卡,您可以查看密钥的所有键值对。 - - ![secret-detail-page](/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-detail-page.png) +2. 点击保密字典名称打开保密字典详情页面。在**数据**选项卡,您可以查看保密字典的所有键值对。 {{< notice note >}} @@ -93,36 +73,30 @@ Kubernetes [密钥 (Secret)](https://kubernetes.io/zh/docs/concepts/configuratio {{}} -3. 点击**更多操作**对密钥进行其他操作。 +3. 点击**更多操作**对保密字典进行其他操作。 - ![secret-dropdown-menu](/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-dropdown-menu.png) - - - **编辑配置文件**:查看、上传、下载或更新 YAML 文件。 - - **编辑密钥**:修改密钥键值对。 - - **删除**:删除密钥并返回密钥列表页面。 + - **编辑 YAML**:查看、上传、下载或更新 YAML 文件。 + - **编辑保密字典**:修改保密字典键值对。 + - **删除**:删除保密字典并返回保密字典列表页面。 -## 使用密钥 +## 使用保密字典 -通常情况下,在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您需要使用密钥。例如,您可以为代码仓库选择密钥。有关更多信息,请参阅[镜像仓库](../image-registry/)。 +通常情况下,在创建工作负载、[服务](../../../project-user-guide/application-workloads/services/)、[任务](../../../project-user-guide/application-workloads/jobs/)或[定时任务](../../../project-user-guide/application-workloads/cronjobs/)时,您需要使用保密字典。例如,您可以为代码仓库选择保密字典。有关更多信息,请参阅[镜像仓库](../image-registry/)。 -![use-secret-repository](/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-repository.png) +此外,您还可以用保密字典为容器添加环境变量。您可以在**容器镜像**页面勾选**环境变量**,点击**引用配置文件或保密字典**,然后从下拉列表中选择一个保密字典。 -此外,您还可以用密钥为容器添加环境变量。您可以在**容器镜像**页面勾选**环境变量**,点击**引用配置文件或密钥**,然后从下拉列表中选择一个密钥。 +## 创建常用保密字典 -![use-secret-image](/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-image.png) +本节介绍如何为 Docker Hub 帐户和 GitHub 帐户创建保密字典。 -## 创建常用密钥 +### 创建 Docker Hub 保密字典 -本节介绍如何为 Docker Hub 帐户和 GitHub 帐户创建密钥。 +1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置**下的**保密字典**,然后在页面右侧点击**创建**。 -### 创建 Docker Hub 密钥 +2. 设置保密字典名称(例如 `dockerhub-id`)并点击**下一步**。在**数据设置**页面,设置以下参数,然后点击**验证**以检查设置的信息是否有效。 -1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置中心**下的**密钥**,然后在页面右侧点击**创建**。 - -2. 设置密钥名称(例如 `dockerhub-id`)并点击**下一步**。在**密钥设置**页面,设置以下参数,然后点击**验证**以检查设置的信息是否有效。 - - **类型**:选择**kubernetes.io/dockerconfigjson(镜像仓库密钥)**。 + **类型**:选择**镜像仓库信息**。 **仓库地址**:输入您的 Docker Hub 仓库地址,例如 `docker.io`。 @@ -130,22 +104,18 @@ Kubernetes [密钥 (Secret)](https://kubernetes.io/zh/docs/concepts/configuratio **密码**:输入您的 Docker Hub 密码。 - ![docker-hub-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/docker-hub-secret.png) - 3. 点击**创建**完成操作。 -### 创建 GitHub 密钥 +### 创建 GitHub 保密字典 -1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置中心**下的**密钥**,然后在页面右侧点击**创建**。 +1. 以 `project-regular` 用户登录 KubeSphere 并进入您的项目。在左侧导航栏中选择**配置**下的**保密字典**,然后在页面右侧点击**创建**。 -2. 设置密钥名称(例如 `github-id`)并点击**下一步**。在**密钥设置**页面,设置以下参数。 +2. 设置保密字典名称(例如 `github-id`)并点击**下一步**。在**数据设置**页面,设置以下参数。 - **类型**:选择**kubernetes.io/basic-auth(帐户密码密钥)**。 + **类型**:选择**用户名和密码**。 **用户名**:输入您的 GitHub 帐户。 **密码**:输入您的 GitHub 密码。 - ![github-secret](/images/docs/zh-cn/project-user-guide/configurations/secrets/github-secret.png) - 3. 点击**创建**完成操作。 \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/configuration/serviceaccounts.md b/content/zh/docs/project-user-guide/configuration/serviceaccounts.md new file mode 100644 index 000000000..d05ffb2a9 --- /dev/null +++ b/content/zh/docs/project-user-guide/configuration/serviceaccounts.md @@ -0,0 +1,48 @@ +--- +title: "Service Accounts" +keywords: 'KubeSphere, Kubernetes, Service Accounts' +description: 'Learn how to create service accounts on KubeSphere.' +linkTitle: "Service Accounts" +weight: 10440 +--- + +A [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) provides an identity for processes that run in a Pod. When accessing a cluster, a user is authenticated by the API server as a particular user account. Processes in containers inside Pods are authenticated as a particular service account when these processes contact the API server. + +This document describes how to create service accounts on KubeSphere. + +## Prerequisites + +You need to create a workspace, a project, and a user (`project-regular`), and invite the user to the project and assign it the `operator` role. For more information, see [Create Workspaces, Projects, Users and Roles](../../../quick-start/create-workspace-and-project/). + +## Create Service Account + +### Step 1: Log in to KubeSphere + +1. Log in to the KubeSphere console as `project-regular`. Go to **Configuration** of a project and click **Service Accounts**. A service account named `default` is displayed on the **Service Accounts** page as it is automatically created when the project is created. + + {{< notice note >}} + + If no service account is specified when creating workloads in a project, the service account `default` in the same project is automatically assigned. + + {{}} + +2. Click **Create**. + +### Step 2: Set a service account + +1. In the displayed dialog box, set the following parameters: + - **Name**: A unique identifier for the service account. + - **Alias**: An alias for the service account to help you better identify the service account. + - **Description**: A brief introduction of the service account. + - **Project Role**: Select a project role from the drop-down list for the service account. Different project roles have [different permissions](../../../project-administration/role-and-member-management/#built-in-roles) in a project. +2. Click **Create** after you finish setting the parameters. The service account created is displayed on the **Service Accounts** page. + +## Service Account Details Page + +1. Click the service account created to go to its details page. +2. Click **Edit Information** to edit its basic information, or click **More** to select an operation from the drop-down menu. + - **Edit YAML**: View, update, or download the YAML file. + - **Change Role**: Change the project role of the service account. + - **Delete**: Delete the service account and return to the previous page. +3. On the **Resource Status** tab, details about the corresponding Secret and the kubeconfig of the service account are displayed. + diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md b/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md index 3ba73bcd2..cb0c6689c 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql.md @@ -12,7 +12,7 @@ weight: 10812 ## 准备工作 - 请确保已[启用应用商店](../../../../pluggable-components/app-store/)。MySQL 和 MySQL Exporter 将通过应用商店来部署。 -- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`)。该帐户需要在该项目中具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。该用户需要在该项目中具有 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。 ## 步骤 1:部署 MySQL @@ -20,7 +20,7 @@ weight: 10812 1. 前往您的项目,点击左上角的**应用商店**。 -2. 点击 **MySQL** 进入其产品详情页面,点击**应用信息**选项卡中的**部署**。 +2. 点击 **MySQL** 进入其详情页面,点击**应用信息**选项卡中的**部署**。 {{< notice note >}} @@ -28,25 +28,21 @@ MySQL 是 KubeSphere 应用商店中的内置应用,应用商店启用后可 {{}} -3. 在**基本信息**下,设置**应用名称**并选择**应用版本**。在**部署位置**下,选择要部署该应用的项目,然后点击**下一步**。 +3. 在**基本信息**下,设置**名称**并选择**版本**。在**位置**下,选择要部署该应用的项目,然后点击**下一步**。 -4. 在**应用配置**下,取消 `mysqlRootPassword` 字段的注解,并设置 root 密码,然后点击**部署**。 - - ![mysql-root-password](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png) +4. 在**应用设置**下,取消 `mysqlRootPassword` 字段的注解,并设置 root 密码,然后点击**安装**。 5. 等待 MySQL 启动并运行。 - ![mysql-ready](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png) - ## 步骤 2:部署 MySQL Exporter 您需要在同一个集群上的同一个项目中部署 MySQL Exporter。MySQL Exporter 负责查询 MySQL 状态并以 Prometheus 格式报告数据。 1. 前往**应用商店**,点击 **MySQL Exporter**。 -2. 在产品详情页面,点击**部署**。 +2. 在详情页面,点击**安装**。 -3. 在**基本信息**下,设置**应用名称**并选择**应用版本**。在**部署位置**下,选择要部署该应用的项目(须和部署 MySQL 的项目相同),然后点击**下一步**。 +3. 在**基本信息**下,设置**名称**并选择**版本**。在**位置**下,选择要部署该应用的项目(须和部署 MySQL 的项目相同),然后点击**下一步**。 4. 请确保 `serviceMonitor.enabled` 设为 `true`。内置 MySQL Exporter 默认将其设置为 `true`,故您无需手动修改 `serviceMonitor.enabled`。 @@ -54,28 +50,20 @@ MySQL 是 KubeSphere 应用商店中的内置应用,应用商店启用后可 如果您使用外部 Exporter 的 Helm Chart,请务必启用 ServiceMonitor CRD。此类 Chart 通常默认禁用 ServiceMonitor,需要手动修改。 {{}} -5. 修改 MySQL 连接参数。MySQL Exporter 需要连接到目标 MySQL。在本教程中,MySQL 以服务名 `mysql-dh3ily` 进行安装。在配置文件的 `mysql` 部分,将 `host` 设置为 `mysql-dh3ily`,`pass` 设置为 `testing`, `user` 设置为 `root`,如下所示。请注意,您 MySQL 服务的**名称可能不同**。 - - ![mysql-exporter-configurations](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-configurations.png) - - 点击**部署**。 +5. 修改 MySQL 连接参数。MySQL Exporter 需要连接到目标 MySQL。在本教程中,MySQL 以服务名 `mysql-dh3ily` 进行安装。在配置文件的 `mysql` 部分,将 `host` 设置为 `mysql-dh3ily`,`pass` 设置为 `testing`, `user` 设置为 `root`,如下所示。请注意,您 MySQL 服务的**名称可能不同**。编辑完成后,点击**安装**。 6. 等待 MySQL Exporter 启动并运行。 - ![mysql-exporter-ready](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png) - ## 步骤 3:创建监控面板 您可以为 MySQL 创建监控面板,并将指标实时可视化。 1. 在同一项目中,选择侧边栏中**监控告警**下的**自定义监控**,点击**创建**。 -2. 在出现的对话框中,为监控面板设置名称(例如,`mysql-overview`)并选择 MySQL 模板。点击**下一步**继续。 +2. 在弹出的对话框中,为监控面板设置名称(例如,`mysql-overview`)并选择 MySQL 模板。点击**下一步**继续。 3. 点击右上角的**保存模板**保存该模板。新创建的监控面板会显示在**自定义监控面板**页面。 - ![mysql-monitoring-dashboard](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-monitoring-dashboard.png) - {{< notice note >}} - 内置 MySQL 模板由 KubeSphere 提供,以便您监控 MySQL 的各项指标。您也可以按需在监控面板上添加更多指标。 diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md b/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md index 99f63b767..5e81bb3c6 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-web.md @@ -11,7 +11,7 @@ weight: 10813 ## 准备工作 - 请确保[已启用 OpenPitrix 系统](../../../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间、一个项目和一个帐户。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../../quick-start/create-workspace-and-project/)。该帐户需要是平台普通用户,邀请至该企业空间中并赋予 `self-provisioner` 角色。故请创建一个 `workspace-self-provisioner` 帐户,赋予 `self-provisioner` 角色,并使用该帐户创建一个项目(例如 `test`)。在本教程中,您以 `workspace-self-provisioner` 身份登录控制台,并在 `demo-workspace` 企业空间的 `test` 项目中进行操作。 +- 您需要创建一个企业空间、一个项目和一个用户。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../../quick-start/create-workspace-and-project/)。该用户需要是平台普通用户,邀请至该企业空间中并赋予 `self-provisioner` 角色。故请创建一个 `workspace-self-provisioner` 用户,赋予 `self-provisioner` 角色,并使用该用户创建一个项目(例如 `test`)。在本教程中,您以 `workspace-self-provisioner` 身份登录控制台,并在 `demo-workspace` 企业空间的 `test` 项目中进行操作。 - 了解 Helm Chart 和 [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/)。 @@ -33,23 +33,9 @@ weight: 10813 ### 步骤 3:上传 Helm Chart -1. 在 `demo-workspace` 企业空间的**概览**页面上转到**应用模板**。 +1. 在 `demo-workspace` 企业空间的**概览**页面上转到**应用管理**下的**应用模板**。 - ![创建应用模板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/app-template-create.PNG) - -2. 点击**创建**,上传 `prometheus-example-app-0.1.0.tgz` 作为镜像,如下所示。 - - ![click-create-app-template](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-create-app-template.PNG) - - ![click-upload-app-template](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template.PNG) - - ![click-upload-app-template-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-2.PNG) - - ![click-upload-app-template-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-4.PNG) - - ![click-upload-app-template-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-5.PNG) - - ![click-upload-app-template-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-6.PNG) +2. 点击**创建**,上传 `prometheus-example-app-0.1.0.tgz`。 ### 步骤 4:部署示例 Web 应用程序 @@ -57,62 +43,30 @@ weight: 10813 1. 点击 `prometheus-example-app`。 - ![部署示例 Web 应用-1](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-1.PNG) - -2. 展开菜单,点击**测试部署**。 - - ![部署示例 Web 应用-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-2.PNG) - - ![部署示例 Web 应用-3](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-3.PNG) +2. 展开菜单,点击**安装**。 3. 请确保将示例 Web 应用程序部署至 `test` 项目,点击**下一步**。 - ![部署示例 Web 应用-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-4.PNG) - -4. 请确保将 `serviceMonitor.enabled` 设置为 `true`,点击**部署**。 - - ![部署示例 Web 应用-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-5.PNG) - - ![部署示例 Web 应用-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-6.PNG) +4. 请确保将 `serviceMonitor.enabled` 设置为 `true`,点击**安装**。 5. 在 `test` 项目的**工作负载**下,稍等片刻待示例 Web 应用程序启动并运行。 - ![创建仪表板-1](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-1.PNG) - ### 步骤 5:创建监控面板 该部分演示如何从零创建监控面板。您需要创建一个显示已处理操作总数的文本图表和一个显示操作率的折线图。 1. 转到**自定义监控**,点击**创建**。 - ![创建仪表板-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-2.PNG) - -2. 设置名称(例如 `sample-web`),点击**创建**。 - - ![创建仪表板-3](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-3.PNG) +2. 设置名称(例如 `sample-web`),点击**下一步**。 3. 在左上角输入标题(例如 `示例 Web 概览`)。 - ![创建仪表板-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-4.PNG) - -4. 点击左列的**加号图标**,创建文本图表。 - - ![创建仪表板-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-5.PNG) +4. 点击左列的 ,创建文本图表。 5. 在**监控指标**字段输入 PromQL 表达式 `myapp_processed_ops_total`,并设置图表名称(例如 `操作数`)。点击右下角的 **√** 继续。 - ![创建仪表板-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-6.PNG) +6. 点击**添加监控项**,选择**折线图**,然后点击**确认**。 -6. 点击**添加监控项**,创建折线图。 - - ![创建仪表板-7](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-7.PNG) - - ![创建仪表板-8](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-8.PNG) - -7. 在**监控指标**中输入 PromQL 表达式 `irate(myapp_processed_ops_total[3m])` 并将图表命名为 `操作率`。要改进外观,可以将**图例名称**设置为 `{{service}}`。它会用图例标签 `service` 的值命名每一段折线。然后将**精确位**设置为 `2`,以便将结果保留两位小数。 - - ![创建仪表板-9](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-9.PNG) +7. 在**监控指标**中输入 PromQL 表达式 `irate(myapp_processed_ops_total[3m])` 并将图表命名为 `操作率`。要改进外观,可以将**图例名称**设置为 `{{service}}`。它会用图例标签 `service` 的值命名每一段折线。然后将**精确位**设置为 `2`,以便将结果保留两位小数。点击右下角的 **√** 继续。 8. 点击**保存模板**进行保存。 - - ![创建仪表板-10](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-10.PNG) \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/introduction.md b/content/zh/docs/project-user-guide/custom-application-monitoring/introduction.md index efd72a5a2..927154800 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/introduction.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/introduction.md @@ -21,7 +21,7 @@ KubeSphere 的监控引擎基于 Prometheus 和 Prometheus Operator。总体而 #### 直接暴露 -直接暴露 Prometheus 格式的应用指标是云原生应用的常用方式。这种方式需要开发者在代码中导入 Prometheus 客户端库并在特定的端点 (Endpoint) 暴露指标。许多应用,例如 ETCD、CoreDNS 和 Istio,都采用这种方式。 +直接暴露 Prometheus 格式的应用指标是云原生应用的常用方式。这种方式需要开发者在代码中导入 Prometheus 客户端库并在特定的端点 (Endpoint) 暴露指标。许多应用,例如 etcd、CoreDNS 和 Istio,都采用这种方式。 Prometheus 社区为大多数编程语言提供了客户端库。您可以在 [Prometheus Client Libraries](https://prometheus.io/docs/instrumenting/clientlibs/) 页面查看支持的语言。使用 Go 语言的开发者可参阅 [Instrumenting a Go Application for Prometheus](https://prometheus.io/docs/guides/go-application/) 了解如何编写符合 Prometheus 规范的应用程序。 diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/overview.md b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/overview.md index 590cd1894..e51994f98 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/overview.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/overview.md @@ -16,8 +16,6 @@ weight: 10815 KubeSphere 自定义监控面板可以视作为一个 YAML 配置文件。数据模型主要基于 [Grafana](https://github.com/grafana/grafana)(一个用于监控和可观测性的开源工具)创建,您可以在 [kubesphere/monitoring-dashboard](https://github.com/kubesphere/monitoring-dashboard) 中找到 KubeSphere 监控面板数据模型的设计。该配置文件便捷,可进行分享,欢迎您通过 [Monitoring Dashboards Gallery](https://github.com/kubesphere/monitoring-dashboard/tree/master/contrib/gallery) 对 KubeSphere 社区贡献面板模板。 -![监控面板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png) - ### 使用内置模板 KubeSphere 为 MySQL、Elasticsearch 和 Redis 提供内置模板方便您快速创建监控面板。如果您想使用内置模板,请选择一种并点击**下一步**。 @@ -28,9 +26,7 @@ KubeSphere 为 MySQL、Elasticsearch 和 Redis 提供内置模板方便您快速 ### 使用 YAML 文件 -打开右上角的**编辑模式**并粘贴您的面板 YAML 文件。 - -![面板配置文件](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-yaml.png) +打开右上角的**编辑 YAML** 并粘贴您的面板 YAML 文件。 ## 面板布局 @@ -40,25 +36,17 @@ KubeSphere 为 MySQL、Elasticsearch 和 Redis 提供内置模板方便您快速 在顶部栏中,您可以配置以下设置:名称、主题、时间范围和刷新间隔。 -![顶部栏](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-dashboard.png) - ### 文本图表栏 您可以在最左侧栏中添加新的文本图表。 -![左侧文本栏](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-text-chart.png) - ### 图表显示栏 您可以在中间栏中查看图表。 -![中间栏](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/middle-column.png) - ### 详情栏 -您可以在最右侧栏中查看图表详情,包括一段时间内指标的 **max**, **min**, **avg** 和 **last** 等数值。 - -![右侧详情栏](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/chart-detail.png) +您可以在最右侧栏中查看图表详情,包括一段时间内指标的 **max**、**min**、**avg** 和 **last** 等数值。 ## 编辑监控面板 @@ -68,8 +56,6 @@ KubeSphere 为 MySQL、Elasticsearch 和 Redis 提供内置模板方便您快速 若要添加文本图表,点击左侧栏中的 。若要在中间栏添加图表,点击右下角的**添加监控项**。 -![编辑监控面板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-items.png) - ### 添加监控组 若要将监控项分组,您可以点击 将右侧的项目拖放至目标组。若要添加新的分组,点击**添加监控组**。如果您想修改监控组的位置,请将鼠标悬停至监控组上并点击右侧的 。 diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/panel.md b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/panel.md index b92e9dc0b..457b7b14d 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/panel.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/panel.md @@ -15,15 +15,14 @@ KubeSphere 当前支持两种图表:文本图表和图形图表。 - **图表名称**:该文本图表的名称。 - **单位**:指标数据的单位。 - **精确位**:支持整数。 -- **监控指标**:包含可用的 Prometheus 指标。 - -![文本图表](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png) +- **监控指标**:从包含可用 Prometheus 指标的下拉列表中指定一个监控指标。 ## 图形图表 图形图表适合显示多个指标的数值。图形图表的编辑窗口包括三部分,上半部分显示指标的实时数值,左侧栏用于设置图表主题,右侧栏用于编辑指标和图表描述。 -- **图表类型**:支持折线图和堆叠图。 +- **图表类型**:支持折线图和柱状图。 +- **图例类型**:支持基础图和堆叠图。 - **图表配色**:修改图表各个指标的颜色。 - **图表名称**:图表的名称。 - **描述信息**:图表描述。 @@ -33,5 +32,3 @@ KubeSphere 当前支持两种图表:文本图表和图形图表。 - **监控指标**:包含可用的 Prometheus 指标。 - **单位**:指标数据的单位。 - **精确位**:支持整数。 - -![图形图表](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png) \ No newline at end of file diff --git a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/querying.md b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/querying.md index 4ce4af810..1a432ee3b 100644 --- a/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/querying.md +++ b/content/zh/docs/project-user-guide/custom-application-monitoring/visualization/querying.md @@ -6,7 +6,7 @@ linkTitle: "查询" weight: 10817 --- -在查询编辑器中,您可以输入 PromQL 表达式以处理和获取指标。若要了解如何编写 PromQL,请参阅 [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/)。 +在查询编辑器中,在**监控指标**中输入 PromQL 表达式以处理和获取指标。若要了解如何编写 PromQL,请参阅 [Query Examples](https://prometheus.io/docs/prometheus/latest/querying/examples/)。 ![查询编辑器-文本图表](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/querying/text-chart-edit.png) diff --git a/content/zh/docs/project-user-guide/grayscale-release/blue-green-deployment.md b/content/zh/docs/project-user-guide/grayscale-release/blue-green-deployment.md index 45dd45e47..2a62197fa 100644 --- a/content/zh/docs/project-user-guide/grayscale-release/blue-green-deployment.md +++ b/content/zh/docs/project-user-guide/grayscale-release/blue-green-deployment.md @@ -16,41 +16,27 @@ weight: 10520 ## 准备工作 - 您需要启用 [KubeSphere 服务网格](../../../pluggable-components/service-mesh/)。 -- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),务必邀请该帐户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),务必邀请该用户到项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要启用**应用治理**并有一个可用应用,以便您可以实现该应用的蓝绿部署。本教程使用示例应用 Bookinfo。有关更多信息,请参见[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 ## 创建蓝绿部署任务 -1. 以 `project-regular` 身份登录 KubeSphere,前往**灰度发布**页面,在**灰度策略**选项卡下,点击**蓝绿部署**右侧的**发布任务**。 +1. 以 `project-regular` 身份登录 KubeSphere,前往**灰度发布**页面,在**发布模式**选项卡下,点击**蓝绿部署**右侧的**创建**。 2. 输入名称然后点击**下一步**。 -3. 在**灰度组件**选项卡,从下拉列表选择您的应用以及想实现蓝绿部署的服务。如果您也使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 +3. 在**服务设置**选项卡,从下拉列表选择您的应用以及想实现蓝绿部署的服务。如果您也使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 -4. 如下图所示,在**灰度版本**选项卡,添加另一个版本(例如 `v2`),然后点击**下一步**: +4. 在**新版本设置**选项卡,添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`),然后点击**下一步**。 - ![blue-green-4](/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.PNG) - - {{< notice note >}} - - 截图中的镜像版本为 `v2`。 - - {{}} - -5. 在**策略配置**选项卡,要让应用版本 `v2` 接管所有流量,请选择**接管所有流量**,然后点击**创建**。 +5. 在**策略设置**选项卡,要让应用版本 `v2` 接管所有流量,请选择**接管**,然后点击**创建**。 6. 蓝绿部署任务创建后,会显示在**任务状态**选项卡下。点击可查看详情。 - ![blue-green-任务列表](/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.PNG) - -7. 稍等片刻后,您可以看到所有流量都流向 `v2` 版本: - - ![blue-green-6](/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.PNG) +7. 稍等片刻后,您可以看到所有流量都流向 `v2` 版本。 8. 新的**部署**也已创建。 - ![版本2-部署](/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.PNG) - 9. 您可以执行以下命令直接获取虚拟服务来查看权重: ```bash @@ -84,7 +70,5 @@ weight: 10520 ## 下线任务 -待您实现蓝绿部署并且结果满足您的预期,您可以点击**任务下线**来移除 `v1` 版本,从而下线任务。 - -![blue-green-7](/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.PNG) +待您实现蓝绿部署并且结果满足您的预期,您可以点击**删除**来移除 `v1` 版本,从而下线任务。 diff --git a/content/zh/docs/project-user-guide/grayscale-release/canary-release.md b/content/zh/docs/project-user-guide/grayscale-release/canary-release.md index 80e2bdb05..a9db571f3 100644 --- a/content/zh/docs/project-user-guide/grayscale-release/canary-release.md +++ b/content/zh/docs/project-user-guide/grayscale-release/canary-release.md @@ -22,30 +22,20 @@ KubeSphere 基于 [Istio](https://istio.io/) 向用户提供部署金丝雀服 - 您需要启用 [KubeSphere 服务网格](../../../pluggable-components/service-mesh/)。 - 您需要启用 [KubeSphere 日志系统](../../../pluggable-components/logging/)以使用 Tracing 功能。 -- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`)。请务必邀请该帐户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`)。请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要开启**应用治理**并有一个可用应用,以便实现该应用的金丝雀发布。本教程中使用的示例应用是 Bookinfo。有关更多信息,请参见[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 ## 步骤 1:创建金丝雀发布任务 -1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到**灰度发布**页面,在**灰度策略**选项卡下,点击**金丝雀发布**右侧的**发布任务**。 +1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到**灰度发布**页面,在**发布模式**选项卡下,点击**金丝雀发布**右侧的**创建**。 2. 设置任务名称,点击**下一步**。 -3. 在**灰度组件**选项卡,从下拉列表中选择您的应用和要实现金丝雀发布的服务。如果您同样使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 +3. 在**服务设置**选项卡,从下拉列表中选择您的应用和要实现金丝雀发布的服务。如果您同样使用示例应用 Bookinfo,请选择 **reviews** 并点击**下一步**。 -4. 在**灰度版本**选项卡,添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.13.0`;将 `v1` 改为 `v2`)并点击**下一步**,如下图所示: +4. 在**新版本设置**选项卡,添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`;将 `v1` 改为 `v2`)并点击**下一步**。 - ![canary-release-4](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-4.PNG) - - {{< notice note >}} - - 请注意截图中的镜像版本是 `v2`。 - - {{}} - -5. 您可以使用具体比例或者使用请求内容(例如 `Http Header`、`Cookie` 和 `URI`)分别向这两个版本(`v1` 和 `v2`)发送流量。选择**按流量比例下发**,并拖动中间的滑块来更改向这两个版本分别发送的流量比例(例如设置为各 50%)。操作完成后,点击**创建**。 - - ![canary-release-5](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-5.gif) +5. 您可以使用具体比例或者使用请求内容(例如 `Http Header`、`Cookie` 和 `URI`)分别向这两个版本(`v1` 和 `v2`)发送流量。选择**指定流量分配**,并拖动中间的滑块来更改向这两个版本分别发送的流量比例(例如设置为各 50%)。操作完成后,点击**创建**。 ## 步骤 2:验证金丝雀发布 @@ -53,20 +43,12 @@ KubeSphere 基于 [Istio](https://istio.io/) 向用户提供部署金丝雀服 1. 访问 Bookinfo 网站,重复刷新浏览器。您会看到 **Book Reviews** 板块以 50% 的比例在 v1 版本和 v2 版本之间切换。 - ![canary](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary.gif) - 2. 金丝雀发布任务创建后会显示在**任务状态**选项卡下。点击该任务查看详情。 - ![canary-release-job](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-job.PNG) - -3. 您可以看到每个版本分别收到一半流量: - - ![canary-release-6](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release_6.png) +3. 您可以看到每个版本分别收到一半流量。 4. 新的部署也已创建。 - ![deployment-list-1](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/deployment-list_1.png) - 5. 您可以执行以下命令直接获取虚拟服务来识别权重: ```bash @@ -116,35 +98,25 @@ KubeSphere 基于 [Istio](https://istio.io/) 向用户提供部署金丝雀服 请确保将以上命令中的主机名和端口号替换成您自己环境的。 {{}} -2. 在**流量治理**中,您可以看到不同服务之间的通信、依赖关系、运行状态及性能。 +2. 在**流量监控**中,您可以看到不同服务之间的通信、依赖关系、运行状态及性能。 - ![traffic-management](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/traffic-management.png) +3. 点击组件(例如 **reviews**),在右侧可以看到流量监控信息,显示**流量**、**成功率**和**持续时间**的实时数据。 -3. 点击组件(例如 **reviews**),在右侧可以看到流量监控信息,显示 **Traffic**、**Success rate** 和 **Duration** 的实时数据。 - - ![topology](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/topology.png) - -## 步骤 4:查看 Tracing 详情 +## 步骤 4:查看链路追踪详情 KubeSphere 提供基于 [Jaeger](https://www.jaegertracing.io/) 的分布式追踪功能,用来对基于微服务的分布式应用程序进行监控及故障排查。 -1. 在 **Tracing** 选项卡中,可以清楚地看到请求的所有阶段及内部调用,以及每个阶段的调用耗时。 - - ![tracing](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing.png) +1. 在**链路追踪**选项卡中,可以清楚地看到请求的所有阶段及内部调用,以及每个阶段的调用耗时。 2. 点击任意条目,可以深入查看请求的详细信息及该请求被处理的位置(在哪个机器或者容器)。 - ![tracing-kubesphere](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png) - ## 步骤 5:接管所有流量 如果一切运行顺利,则可以将所有流量引入新版本。 -1. 在**灰度发布**中,点击金丝雀发布任务。 +1. 在**任务状态**中,点击金丝雀发布任务。 -2. 在弹出的对话框中,点击 **reviews v2** 右侧的 ,选择**接管所有流量**。这代表 100% 的流量将会被发送到新版本 (v2)。 - - ![take-over-release](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-release.png) +2. 在弹出的对话框中,点击 **reviews v2** 右侧的 ,选择**接管**。这代表 100% 的流量将会被发送到新版本 (v2)。 {{< notice note >}} 如果新版本出现任何问题,可以随时回滚到之前的 v1 版本。 @@ -152,5 +124,4 @@ KubeSphere 提供基于 [Jaeger](https://www.jaegertracing.io/) 的分布式追 3. 再次访问 Bookinfo,多刷新几次浏览器,您会发现页面只会显示 **reviews v2** 的结果(即带有黑色星标的评级)。 - ![finish-canary-release](/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/finish-canary-release.png) diff --git a/content/zh/docs/project-user-guide/grayscale-release/traffic-mirroring.md b/content/zh/docs/project-user-guide/grayscale-release/traffic-mirroring.md index bb48c8f67..57f8998a9 100644 --- a/content/zh/docs/project-user-guide/grayscale-release/traffic-mirroring.md +++ b/content/zh/docs/project-user-guide/grayscale-release/traffic-mirroring.md @@ -11,35 +11,27 @@ weight: 10540 ## 准备工作 - 您需要启用 [KubeSphere 服务网络](../../../pluggable-components/service-mesh/)。 -- 您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要启用**应用治理**,并有可用的应用,以便为该应用进行流量镜像。本教程以 Bookinfo 为例。有关更多信息,请参阅[部署 Bookinfo 和管理流量](../../../quick-start/deploy-bookinfo-to-k8s/)。 ## 创建流量镜像任务 -1. 以 `project-regular` 用户登录 KubeSphere 并进入项目。前往**灰度发布**页面,在页面右侧点击**流量镜像**右侧的**发布任务**。 +1. 以 `project-regular` 用户登录 KubeSphere 并进入项目。前往**灰度发布**页面,在页面右侧点击**流量镜像**右侧的**创建**。 2. 设置发布任务的名称并点击**下一步**。 -3. 在**灰度组件**选项卡,从下拉列表中选择需要进行流量镜像的应用和对应的服务(本教程以 Bookinfo 应用的 reviews 服务为例),然后点击**下一步**。 +3. 在**服务设置**选项卡,从下拉列表中选择需要进行流量镜像的应用和对应的服务(本教程以 Bookinfo 应用的 reviews 服务为例),然后点击**下一步**。 -4. 在**灰度版本**选项卡,为应用添加另一个版本(例如 `v2`),然后点击**下一步**。 +4. 在**新版本设置**选项卡,为应用添加另一个版本(例如 `kubesphere/examples-bookinfo-reviews-v2:1.16.2`;将 `v1` 改为 `v2`),然后点击**下一步**。 - ![traffic-mirroring-4](/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg) - -5. 在**策略配置**选项卡,点击**创建**。 +5. 在**策略设置**选项卡,点击**创建**。 6. 新建的流量镜像任务显示在**任务状态**页面。点击该任务查看详情。 - ![traffic-mirroring-task](/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-task.jpg) - 7. 在详情页面,您可以看到流量被镜像至 `v2` 版本,同时折线图中显示实时流量。 - ![traffic-mirroring-6](/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg) - 8. 新建的部署也显示在**工作负载**下的**部署**页面。 - ![new-deployment](/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg) - 9. 您可以执行以下命令查看虚拟服务的 `mirror` 和 `weight` 字段。 ```bash @@ -86,6 +78,4 @@ weight: 10540 ## 下线任务 -您可以点击**任务下线**移除流量镜像任务。此操作不会影响当前的应用版本。 - -![remove-traffic-mirroring](/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg) \ No newline at end of file +您可以点击**删除**移除流量镜像任务。此操作不会影响当前的应用版本。 diff --git a/content/zh/docs/project-user-guide/image-builder/_index.md b/content/zh/docs/project-user-guide/image-builder/_index.md index d10a9e339..d05c53dda 100644 --- a/content/zh/docs/project-user-guide/image-builder/_index.md +++ b/content/zh/docs/project-user-guide/image-builder/_index.md @@ -1,5 +1,5 @@ --- -linkTitle: "Image Builder" +linkTitle: "镜像构建器" weight: 10600 _build: diff --git a/content/zh/docs/project-user-guide/image-builder/binary-to-image.md b/content/zh/docs/project-user-guide/image-builder/binary-to-image.md index 166a50ab5..d8d4011f6 100644 --- a/content/zh/docs/project-user-guide/image-builder/binary-to-image.md +++ b/content/zh/docs/project-user-guide/image-builder/binary-to-image.md @@ -1,7 +1,7 @@ --- title: "Binary to Image:发布制品到 Kubernetes" keywords: "KubeSphere, Kubernetes, Docker, B2I, Binary-to-Image" -description: "如何使用 Binary-to-Image 发布制品到 Kubernetes" +description: "如何使用 Binary-to-Image 发布制品到 Kubernetes。" linkTitle: "Binary to Image:发布制品到 Kubernetes" weight: 10620 --- @@ -20,13 +20,19 @@ Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执 | [b2i-war-java11.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java11.war) | [springmvc5](https://github.com/kubesphere/s2i-java-container/tree/master/tomcat/examples/springmvc5) | | [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) | [devops-go-sample](https://github.com/runzexia/devops-go-sample) | | [b2i-jar-java11.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java11.jar) | [ java-maven-example](https://github.com/kubesphere/s2i-java-container/tree/master/java/examples/maven) | -| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-java-sample](https://github.com/kubesphere/devops-java-sample) | +| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) | + +## 视频演示 + + ## 准备工作 - 您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。 - 您需要创建一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。 -- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),请务必邀请该帐户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/set-ci-node/)。 ## 使用 Binary-to-Image (B2I) 创建服务 @@ -35,91 +41,60 @@ Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执 ![服务构建](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service-build.png) -### 步骤 1:创建 Docker Hub 密钥 +### 步骤 1:创建 Docker Hub 保密字典 -您必须创建 Docker Hub 密钥,以便将通过 B2I 创建的 Docker 镜像推送至 Docker Hub。以 `project-regular` 身份登录 KubeSphere,转到您的项目并创建一个 Docker Hub 密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。 +您必须创建 Docker Hub 保密字典,以便将通过 B2I 创建的 Docker 镜像推送至 Docker Hub。以 `project-regular` 身份登录 KubeSphere,转到您的项目并创建一个 Docker Hub 保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 ### 步骤 2:创建服务 1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。 - ![创建服务](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/create-service.PNG) - -2. 下拉至**通过制品构建新的服务**,选择 **war**。本教程使用 [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) 项目作为示例并上传 war 制品至 KubeSphere。设置一个名称,例如 `b2i-war-java8`,点击**下一步**。 +2. 下拉至**通过制品构建服务**,选择 **WAR**。本教程使用 [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) 项目作为示例并上传 WAR 制品至 KubeSphere。设置一个名称,例如 `b2i-war-java8`,点击**下一步**。 3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。 - ![构建设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-settings.PNG) - **服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。 - **上传制品**:上传 war 制品 ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war))。 + **制品文件**:上传 WAR 制品 ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war))。 **构建环境**:选择 **kubesphere/tomcat85-java8-centos7:v2.1.0**。 **镜像名称**:输入 `/` 或 `/` 作为镜像名称。 - **tag**:镜像标签,请输入 `latest`。 + **镜像标签**:镜像标签,请输入 `latest`。 - **Target image repository**:镜像会推送至 Docker Hub,故请选择 Docker Hub 密钥。 + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 -4. 在**容器设置**页面,下拉至**服务设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-port`),**容器端口**和**服务端口**都输入 `8080`。点击**下一步**继续。 - - ![容器设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/container-settings.PNG) +4. 在**容器组设置**页面,下拉至**端口设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-port`),**容器端口**和**服务端口**都输入 `8080`。点击**下一步**继续。 {{< notice note >}} - 有关如何在**容器设置**页面设置其他参数的更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。 + 有关如何在**容器设置**页面设置其他参数的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 {{}} -5. 在**挂载存储**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。 +5. 在**存储卷设置**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。 -6. 在**高级设置**页面,选中**外网访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 - - ![高级设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/advanced-settings.PNG) - -7. 点击左侧导航栏的**构建镜像**,您可以看到正在构建示例镜像。![构建中](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building.PNG) +6. 在**高级设置**页面,选中**外部访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 +7. 点击左侧导航栏的**镜像构建器**,您可以看到正在构建示例镜像。 ### 步骤 3:查看结果 -1. 稍等片刻,您可以看到镜像状态变为**成功**。 - - ![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/successful.PNG) +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 - ![查看日志](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-logs.PNG) - -3. 回到上一层页面,您可以看到该镜像相应的任务、部署和服务都已成功创建。 - - #### 服务 - - ![service](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service.PNG) - - #### 部署 - - ![deployment](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/deployment.PNG) - - #### 任务 - - ![job](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job.PNG) +3. 回到**服务**、**部署**和**任务**页面,您可以看到该镜像相应的服务、部署和任务都已成功创建。 4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 - ![Docker 镜像](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image.PNG) - ### 步骤 4:访问 B2I 服务 1. 在**服务**页面,请点击 B2I 服务前往其详情页面,您可以查看暴露的端口号。 - ![端口暴露](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/exposed-port.PNG) - 2. 通过 `http://://` 访问服务。 - ![访问服务](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/access-service.PNG) - {{< notice note >}} 取决于您的部署环境,您可能需要在安全组中放行端口并配置端口转发规则。 @@ -128,13 +103,13 @@ Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执 ## 使用 Image Builder 构建镜像 -前述示例通过创建服务来实现整个 B2I 工作流。此外,您也可以直接使用 Image Builder 基于制品构建镜像,但这个方式不会将镜像发布至 Kubernetes。 +前述示例通过创建服务来实现整个 B2I 工作流。此外,您也可以直接使用镜像构建器基于制品构建镜像,但这个方式不会将镜像发布至 Kubernetes。 ![build-binary](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-binary.png) {{< notice note >}} -请确保您已经创建了 Docker Hub 密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。 +请确保您已经创建了 Docker Hub 保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 {{}} @@ -142,47 +117,32 @@ Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执 1. 以 `project-regular` 身份登录 KubeSphere,转到您的项目。 -2. 在左侧导航栏中选择**构建镜像**,然后点击**创建**。 +2. 在左侧导航栏中选择**镜像构建器**,然后点击**创建**。 - ![image-builder](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-builder.PNG) - -3. 在弹出对话框中,选择 **binary** 并点击**下一步**。 - - ![upload-artifact](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/upload-artifact.PNG) +3. 在弹出的对话框中,选择 **二进制** 并点击**下一步**。 4. 在**构建设置**页面,请提供以下相应信息,然后点击**创建**。 - ![buidling-settings-2](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-settings-2.PNG) - **上传制品**:下载 [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) 并上传至 KubeSphere。 **构建环境**:选择 **kubesphere/s2i-binary:v2.1.0**。 **镜像名称**:自定义镜像名称。 - **tag**:镜像标签,请输入 `latest`。 + **镜像标签**:镜像标签,请输入 `latest`。 - **Target image repository**:镜像会推送至 Docker Hub,故请选择 Docker Hub 密钥。 + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 -5. 在**构建镜像**页面,您可以看到正在构建镜像。 - - ![构建状态](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-status.PNG) +5. 在**镜像构建器**页面,您可以看到正在构建镜像。 ### 步骤 2:检查结果 -1. 稍等片刻,您可以看到镜像状态变为**成功**。 +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 - ![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-success.PNG) +2. 点击该镜像构建器前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 -2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 - - ![查看日志](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-log.PNG) - -3. 回到上一层页面,您可以看到该镜像相应的任务已成功创建。 - - ![Job 已创建](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job-created.PNG) +3. 前往**任务**页面,您可以看到该镜像相应的任务已成功创建。 4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 - ![Docker 镜像已推送](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image-pushed.PNG) diff --git a/content/zh/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md b/content/zh/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md index 12e6dad86..9e6aea59f 100644 --- a/content/zh/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md +++ b/content/zh/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks.md @@ -7,14 +7,14 @@ weight: 10650 --- -KubeSphere 提供 Source-to-Image (S2I) 和 Binary-to-Image (B2I) 功能,以自动化镜像构建、推送和应用程序部署。在 KubeSphere v3.1 中,您可以配置 S2I 和 B2I Webhook,以便当代码仓库中存在任何相关活动时,自动触发镜像构建器。 +KubeSphere 提供 Source-to-Image (S2I) 和 Binary-to-Image (B2I) 功能,以自动化镜像构建、推送和应用程序部署。在 KubeSphere v3.1.x 以及后续版本中,您可以配置 S2I 和 B2I Webhook,以便当代码仓库中存在任何相关活动时,自动触发镜像构建器。 本教程演示如何配置 S2I 和 B2I webhooks。 ## 准备工作 - 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/),该系统已集成 S2I。 -- 您需要创建一个创建企业空间,一个项目 (`demo-project`) 和一个帐户 (`project-regular`)。`project-regular` 需要被邀请到项目中,并赋予 `operator` 角色。有关详细信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 +- 您需要创建一个创建企业空间,一个项目 (`demo-project`) 和一个用户 (`project-regular`)。`project-regular` 需要被邀请到项目中,并赋予 `operator` 角色。有关详细信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/#step-1-create-an-account)。 - 您需要创建一个 S2I 镜像构建器和 B2I 镜像构建器。有关更多信息,请参见 [Source to Image:无需 Dockerfile 发布应用](../source-to-image/)和[Binary to Image:发布制品到 Kubernetes](../binary-to-image/)。 ## 配置 S2I Webhook @@ -23,17 +23,11 @@ KubeSphere 提供 Source-to-Image (S2I) 和 Binary-to-Image (B2I) 功能,以 1. 以 `admin` 身份登录 KubeSphere Web 控制台。在左上角点击**平台管理**,然后选择**集群管理**。 -2. 选择在**应用负载**下面的**服务**,从下拉框中选择 **kubesphere-devops-system**,然后点击 **s2ioperator-trigger-service** 进入详情页面。 +2. 前往**应用负载**下的**服务**,从下拉框中选择 **kubesphere-devops-system**,然后点击 **s2ioperator-trigger-service** 进入详情页面。 - ![s2i-trigger-service](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-service.png) +3. 点击**更多操作**,选择**编辑外部访问**。 -3. 点击**更多操作**,选择**编辑外网访问**。 - - ![edit-trigger-service](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/edit-trigger-service.png) - -4. 在出现的窗口中,从**访问方法**的下拉菜单中选择 **NodePort**,然后点击**确定**。 - - ![select-nodeport](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/select-nodeport.png) +4. 在弹出的对话框中,从**访问方式**的下拉菜单中选择 **NodePort**,然后点击**确定**。 {{< notice note >}} @@ -41,67 +35,48 @@ KubeSphere 提供 Source-to-Image (S2I) 和 Binary-to-Image (B2I) 功能,以 {{}} -5. 在详情界面可以查看 **Node Port**。Node Port 会包括在 S2I webhook URL 中。 +5. 在详情界面可以查看 **NodePort**。S2I webhook URL 中将包含此 NodePort。 - ![s2i-nodeport](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-nodeport.png) +### 步骤 2:配置 S2I webhook -### 步骤 2:配置一个 S2I webhook - -1. 登出 KubeSphere 并以 `project-regular` 帐户登回。然后转到 `demo-project`。 +1. 登出 KubeSphere 并以 `project-regular` 用户登回。然后转到 `demo-project`。 2. 在**镜像构建器**中,点击 S2I 镜像构建器,进入详情页面。 - ![click-s2i](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-s2i.png) +3. 您可以在**远程触发器**中看到自动生成的链接。复制 `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`,S2I webhook URL 中将包含这个链接。 -3. 您可以在**远程触发链接**中看到自动生成的链接。复制 `/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`,因为这个链接将包含在 S2I webhook URL 中。 +4. 登录您的 GitHub 帐户,转到用于 S2I 镜像构建器的源代码仓库。转到 **Settings** 下的 **Webhooks**,然后点击 **Add webhook**。 - ![s2i-trigger-link](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-link.png) - -4. 登录您的 GitHub 帐户,转到用于 S2I 镜像构建器的源代码仓库。转到 **Settings** 下的 **Webhooks**,然后点击 **Add webhook**。 - - ![click-add-webhook](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-add-webhook.png) - -5. 在 **Payload URL**,输入 `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`。您可以基于您的需要选择触发事件,然后点击 **Add webhook**。本教程出于演示目的将会选择 **Just the push event**。 - - ![add-payload-url](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/add-payload-url.png) +5. 在 **Payload URL**,输入 `http://:/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/`。您可以按需选择触发事件,然后点击 **Add webhook**。本教程出于演示目的,选择 **Just the push event**。 {{< notice note >}} - `` 是您自己的 IP 地址,`` 是您在第一步中获得的 NodePort。`/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` 来自 S2I 的远程触发链接。确保您用的是您自己的 IP 地址、Service NodePort 和 S2I 远程触发链接。您可能还需要配置必要的端口转发规则,并根据 Kubernetes 群集的部署位置,在安全组中打开端口。 + `` 是您自己的 IP 地址,`` 是您在第一步中获得的 NodePort。`/s2itrigger/v1alpha1/general/namespaces/demo-project/s2ibuilders/felixnoo-s2i-sample-latest-zhd/` 来自 S2I 的远程触发器链接。确保您用的是您自己的 IP 地址、Service NodePort 和 S2I 远程触发器链接。根据您 Kubernetes 集群的部署位置,您可能还需要配置必要的端口转发规则并在安全组中打开端口。 {{}} 6. 添加 webhook 后,您可以点击 webhook 查看 **Recent Deliveries** 中的交付详细信息。如果有效负载 URL 有效,您可以看到绿色的勾号。 - ![webhook-delivery](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/webhook-delivery.png) - -7. 完成上述所有操作后,如果源代码仓库中存在推送事件,则会自动触发 S2I Image Builder。 - - ![s2i-auto-build](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-auto-build.png) +7. 完成上述所有操作后,如果源代码仓库中存在推送事件,则会自动触发 S2I 镜像构建器。 ## 配置 B2I Webhook 您可以按照相同的步骤配置 B2I webhook。 -1. 暴露 S2I 触发服务。 +1. 暴露 S2I 触发器服务。 -2. 在 B2I 镜像构建器的详细信息页面中查看 **Remote Trigger Link**。 - - ![b2i-trigger-link](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-trigger-link.png) +2. 在 B2I 镜像构建器的详细信息页面中查看**远程触发器**。 3. 在源代码仓库中添加有效负载 URL。B2I 有效负载 URL 格式与 S2I 有效负载 URL 格式相同。 - ![b2i-payload-url](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-payload-url.png) - {{< notice note >}} - 根据 Kubernetes 群集的部署位置,您可能需要配置必要的端口转发规则并在安全组中打开端口。 + 根据您 Kubernetes 集群的部署位置,您可能需要配置必要的端口转发规则并在安全组中打开端口。 {{}} 4. 如果源代码仓库发生相关事件,B2I 镜像构建器将自动触发。 - ![b2i-auto-build](/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-auto-build.png) diff --git a/content/zh/docs/project-user-guide/image-builder/s2i-templates.md b/content/zh/docs/project-user-guide/image-builder/s2i-templates.md index 7fd7d13f8..cbcc9874f 100644 --- a/content/zh/docs/project-user-guide/image-builder/s2i-templates.md +++ b/content/zh/docs/project-user-guide/image-builder/s2i-templates.md @@ -1,56 +1,57 @@ --- -title: "Customize S2I Templates" +title: "自定义 S2I 模板" keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image' -description: 'Customize S2I templates and understand different template parameters.' -linkTitle: "Customize S2I Templates" +description: '学习如何自定义 S2I 模板,并理解不同的模板参数。' +linkTitle: "自定义 S2I 模板" weight: 10640 + --- -Once you have understood the workflow and logic of Source-to-Image (S2I), you can customize Image Builder templates (i.e. S2I/B2I templates) based on your projects to extend S2I capabilities. KubeSphere provides several common Image Builder templates, such as [Python](https://github.com/kubesphere/s2i-python-container/) and [Java](https://github.com/kubesphere/s2i-java-container/). +当您了解了 Source-to-Image (S2I) 的工作流和逻辑,就可以根据您的项目自定义镜像构建器模板(即 S2I / B2I 模板),以扩展 S2I 功能。KubeSphere 提供了几种常见的镜像构建器模板,如 [Python ](https://github.com/kubesphere/s2i-python-container/)和 [Java](https://github.com/kubesphere/s2i-java-container/)。 -This tutorial demonstrates how to create an Image Builder that contains an Nginx service. If you need to use Runtime Image in your project, refer to [this document](https://github.com/kubesphere/s2irun/blob/master/docs/runtime_image.md) for more information about how to create a Runtime Image. +本教程演示如何创建包含 Nginx 服务的镜像构建器。如果需要在项目中使用运行时镜像,请参阅[本文档](https://github.com/kubesphere/s2irun/blob/master/docs/runtime_image.md)以了解有关如何创建运行时镜像的更多信息。 -## Prerequisites +## 准备工作 -S2I template customization can be divided into two parts. +S2I 模板自定义分成两部分。 -- Part 1: S2I Image Builder customization - - assemble (required): the `assemble` script that builds application artifacts from source code. - - run (required): the `run` script that executes an application. - - save-artifacts (optional): the `save-artifacts` script that manages all dependencies in an incremental building process. - - usage (optional): the script that provides instructions. - - test (optional): the script for testing. -- Part 2: definition of S2I template +- 第一部分:S2I 自定义镜像构建 + - assemble(必需):从源代码构建应用程序制品的脚本 `assemble`。 + - run(必需):用于运行应用程序的脚本。 + - save-artifacts(可选):管理增量构建过程中的所有依赖。 + - usage(可选):提供说明的脚本。 + - test (可选):用于测试的脚本。 +- 第二部分:S2I 模板定义 -You need to have the required elements for S2I template customization ready in advance. +您需要提前准备好 S2I 模板定制所需的元素。 {{< notice note >}} -The Image Builder is compatible with that of OpenShift, and you can reuse it in KubeSphere. For more information about S2I Image Builder, refer to [S2IRun](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-builder-image-requirements). +与 OpenShift 的镜像构建器兼容,您可以在 KubeSphere 中重用它。有关 S2I 镜像构建器的更多信息,请参见 [S2IRun](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-builder-image-requirements)。 {{}} -## Create an Image Builder +## 创建镜像构建器 -### Step 1: Prepare S2I directory +### 步骤 1:准备 S2I 目录 -1. [S2I command line tool](https://github.com/openshift/source-to-image/releases) provides an easy-to-use command to initialize a base directory structure required by the Builder. Run the following commands to install S2I CLI. +1. [S2I 命令行工具](https://github.com/openshift/source-to-image/releases)提供了一个易于使用的命令来初始化构建器所需的基本目录结构。运行以下命令以安装S2I CLI。 ```bash - $ wget https://github.com/openshift/source-to-image/releases/download/v1.1.14/source-to-image-v1.1.14-874754de-linux-386.tar.gz + $ wget https://github.com/openshift/source-to-image/releases/download/v1.2.04/source-to-image-v1.1.14-874754de-linux-386.tar.gz $ tar -xvf source-to-image-v1.1.14-874754de-linux-386.tar.gz $ ls s2i source-to-image-v1.1.14-874754de-linux-386.tar.gz sti $ cp s2i /usr/local/bin ``` -2. This tutorial uses `nginx-centos7` as the name of the Image Builder. Run the `s2i create` command to initialize the base directory structure. +2. 本教程使用 `nginx-centos7` 作为镜像构建器的名称。运行 `s2i create` 命令初始化基本目录结构。 ```bash s2i create nginx-centos7 s2i-builder-docs ``` -3. The directory structure is initialized as follows. +3. 目录结构初始化如下。 ``` s2i-builder-docs/ @@ -65,11 +66,11 @@ The Image Builder is compatible with that of OpenShift, and you can reuse it in usage - a script that prints the usage of the Image Builder ``` -### Step 2: Modify the Dockerfile +### 步骤 2:修改 Dockerfile -A Dockerfile installs all of the necessary tools and libraries that are needed to build and run an application. This file will also copy the S2I scripts into the output image. +Dockerfile 安装用于构建和运行应用程序的的所有必要工具和库。Dockerfile 还将 S2I 脚本复制到输出镜像中。 -Modify the Dockerfile as follows to define the Image Builder. +按如下所示修改 Dockerfile 以定义镜像构建器。 #### Dockerfile @@ -119,13 +120,13 @@ CMD ["/usr/libexec/s2i/usage"] {{< notice note >}} -S2I scripts will use the flags defined in the Dockerfile as parameters. If you need to use a base image different from those provided by KubeSphere, refer to [S2I Scripts](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-scripts). +S2I 脚本将使用 Dockerfile 中定义的标志作为参数。如果您需要使用与 KubeSphere 提供的基础镜像不同的基础镜像,请参见 [S2I Scripts](https://github.com/kubesphere/s2irun/blob/master/docs/builder_image.md#s2i-scripts)。 {{}} -### Step 3: Create S2I Scripts +### 步骤 3:创建 S2I 脚本 -1. Create an `assemble` script as follows to copy configuration file and static contents to the target container. +1. 创建一个 `assemble` 脚本,如下所示,将配置文件和静态内容复制到目标容器中。 ```bash #!/bin/bash -e @@ -146,11 +147,11 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n {{< notice note >}} - By default, `s2i build` places the application source code in `/tmp/src`. The above commands copy the application source code to the working directory `/opt/app-root/src` defined by `kubespheredev/s2i-base-centos7:1`. + 默认情况下,`s2i build` 将应用程序源代码放在 `/tmp/src`。上述命令将应用程序源代码复制到由 `kubespheredev/s2i-base-centos7:1` 定义的工作目录 `/opt/app-root/src`。 {{}} -2. Create a `run` script as follows. In this tutorial, it only starts the `nginx` server. +2. 创建一个 `run` 脚本,如下所示。在本教程中,它只启动 `nginx` 服务器。 ```bash #!/bin/bash -e @@ -159,12 +160,10 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n ``` {{< notice note >}} - - This tutorial uses the `exec` command to execute the host process of `nginx` server to let all signals sent from `docker` be received by `nginx` while `nginx` can use the standard input and output streams of the container. Besides, the `save-artifacts` script allows a new build to reuse content from a previous version of application image. The `save-artifacts` script can be deleted because this tutorial does not implement incremental building. - + 本教程使用 `exec` 命令执行 `nginx` 服务器主机进程,让 `nginx` 接收从 `docker` 发送的所有信号,而 `nginx` 可以使用容器的标准输入和输出流。此外,`save-artifacts` 脚本允许新的构建重用应用程序早期版本镜像内容。`save-artifacts` 脚本可以删除,因为本教程不实现增量构建。 {{}} -3. Create a `usage` script as follows. It prints out instructions on how to use the image. +3. 创建一个 `usage` 脚本,如下所示,它会打印出镜像使用说明。 ```bash #!/bin/bash -e @@ -179,9 +178,9 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n EOF ``` -### Step 4: Build and run +### 步骤 4:构建与运行 -1. Modify the image name in `Makefile`. +1. 修改 Makefile 中的镜像名称 ```bash IMAGE_NAME = kubespheredev/nginx-centos7-s2ibuilder-sample @@ -198,7 +197,7 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n IMAGE_NAME=$(IMAGE_NAME)-candidate test/run ``` -2. Run the `make build` command to build the Image Builder for Nginx. +2. 运行 `make build` 命令为 NGINX 构建镜像构建器。 ```bash $ make build @@ -230,7 +229,7 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n Successfully tagged kubespheredev/nginx-centos7-s2ibuilder-sample:latest ``` -3. With the Image Builder created, run the following command to create an application image. +3. 在创建镜像构建器后,运行以下命令创建应用程序镜像。 ```bash $ s2i build ./test/test-app kubespheredev/nginx-centos7-s2ibuilder-sample:latest sample-app @@ -239,24 +238,20 @@ S2I scripts will use the flags defined in the Dockerfile as parameters. If you n ``` {{< notice note >}} - - Following the logic defined in the `assemble` script, S2I creates an application image using the Image Builder as a base and injecting the source code from the `test/test-app` directory. - + 按照 `assemble` 脚本中定义的逻辑,S2I 使用镜像构建器作为基础创建应用程序镜像,并从 `test/test-app` 目录注入源代码。 {{}} -4. Run the following command to run the application image. +4. 运行以下命令以运行应用程序镜像。 ```bash docker run -p 8080:8080 sample-app ``` - You can access the Nginx application at `http://localhost:8080`. + 您可以在此位置访问 Nginx 应用程序:`http://localhost:8080`。 - ![access-nginx](/images/docs/project-user-guide/image-builder/s2i-templates/access-nginx.png) +### 步骤 5:推送镜像与创建 S2I 模板 -### Step 5: Push image and create S2I template - -Once you finish testing the S2I Image Builder locally, you can push the image to your custom image repository. You also need to create a YAML file as the S2I Builder template as follows. +在本地完成 S2I 镜像构建器测试后,可以将镜像推送到自定义镜像仓库。您还需要创建一个 YAML 文件作为 S2I 构建器模板,如下所示。 #### s2ibuildertemplate.yaml @@ -272,57 +267,55 @@ spec: containerInfo: - builderImage: kubespheredev/nginx-centos7-s2ibuilder-sample codeFramework: nginx # type of code framework - defaultBaseImage: kubespheredev/nginx-centos7-s2ibuilder-sample # default Image Builder (can be replaced by customized image) + defaultBaseImage: kubespheredev/nginx-centos7-s2ibuilder-sample # default Image Builder (can be replaced by a customized image) version: 0.0.1 # Builder template version - description: "This is a S2I builder template for Nginx builds whose result can be run directly without any further application server.." # Builder template description + description: "This is an S2I builder template for NGINX builds whose result can be run directly without any further application server." # Builder template description ``` -### Step 6: Use S2I template on KubeSphere +### 步骤 6:在 KubeSphere 使用 S2I 模板 -1. Run the following command to submit the S2I template created above to KubeSphere. +1. 运行以下命令将上面创建的 S2I 模板提交至 KubeSphere。 - ```bash + ```bash $ kubectl apply -f s2ibuildertemplate.yaml s2ibuildertemplate.devops.kubesphere.io/nginx created - ``` + ``` -2. You can find the customized S2I template available when you create a S2I build on KubeSphere. +2. 在 KubeSphere 上创建 S2I 构建时,可以在**构建环境**中找到自定义 S2I 模板。 - ![template-available](/images/docs/project-user-guide/image-builder/s2i-templates/template-available.png) +## S2I 模板参数定义 -## S2I Template Parameters Definition +有关 S2I 模板标签作为参数传递给前端分类的详细说明,请参见下表 -Refer to the following detailed descriptions of S2I template labels passed as parameters to frontend classifications. - -| Label Name | Option | Definition | +| 标签名称 | 选项 | 定义 | | ------------------------------------- | -------------------- | ------------------------------------------------------------ | -| builder-type.kubesphere.io/s2i: "s2i" | "s2i" | The type of this template is S2I, which builds images based on application source code. | -| builder-type.kubesphere.io/b2i | "b2i" | The type of this template is B2I, which builds images based on binary files or other artifacts. | -| binary-type.kubesphere.io | "jar","war","binary" | This type is complementary to the type of B2I and will be required when B2I is selected. For example, select the type of "jar" when a JAR package is provided. In KubeSphere v2.1.1 and later, it is also allowed to customize B2I template. | +| builder-type.kubesphere.io/s2i: "s2i" | "s2i" | 模板类型为 S2I,基于应用程序源代码构建镜像。 | +| builder-type.kubesphere.io/b2i | "b2i" | 模板类型为 B2I,基于二进制文件或其他制品构建镜像。 | +| binary-type.kubesphere.io | "jar","war","binary" | 该类型为 B2I 类型的补充,在选择 B2I 类型时需要。例如,当提供 Jar 包时,选择 "jar" 类型。在 KubeSphere v2.1.1 及更高版本,允许自定义 B2I 模板。 | -Refer to the following detailed descriptions of S2I template parameters. The required parameters are marked with an asterisk. +参见以下 S2I 模板参数的详细说明。必需参数用星号标记。 -| Parameter | Type | Definition | +| 参数 | 类型 | 定义 | | ------------------------------------------ | -------- | ------------------------------------------------------------ | -| *containerInfo | []struct | The information about Image Builder. | -| *containerInfo.builderImage | string | S2I Image Builder, such as kubesphere/java-8-centos7:v2.1.0. | -| containerInfo.runtimeImage | string | S2I Runtime Image, such as kubesphere/java-8-runtime:v2.1.0. | -| containerInfo.buildVolumes | []string | The information about mounted volume. The format is "volume_name:mount_path", such as ["s2i_java_cache:/tmp/artifacts","test_cache:test_path"]. | -| containerInfo.runtimeArtifacts | []struct | The list of original path and target path for the output artifact; only add it for phased building. | -| containerInfo.runtimeArtifacts.source | string | The original path of artifact in Image Builder. | -| containerInfo.runtimeArtifacts.destination | string | The target path of artifact in Runtime Image. | -| containerInfo.runtimeArtifacts.keep | bool | Whether to keep the data in the output image. | -| *defaultBaseImage | string | The default Image Builder. | -| *codeFramework | string | The code framework type, such as Java, Ruby. | -| environment | []struct | The list of environment variables in the building process. | -| environment.key | string | The name of environment variables. | -| environment.type | string | The type of environment variable keys. | -| environment.description | string | The description of environment variables. | -| environment.optValues | []string | The list of parameters for environment variables. | -| environment.required | bool | Whether the environment variable is required to be set. | -| environment.defaultValue | string | The default value of environment variables. | -| environment.value | string | The value of environment variables. | -| iconPath | string | The application name. | -| version | string | The version of S2I template. | -| description | string | The description of the template's functions and usage. | +| *containerInfo | []struct | 关于镜像构建器的信息。 | +| *containerInfo.builderImage | string | S2I 镜像构建器,如:kubesphere/java-8-centos7:v2.1.0. | +| containerInfo.runtimeImage | string | S2I 运行时镜像,如:kubesphere/java-8-runtime:v2.1.0. | +| containerInfo.buildVolumes | []string | 关于挂载卷的信息。格式为 "volume_name:mount_path", 如:"s2i_java_cache:/tmp/artifacts","test_cache:test_path"]。 | +| containerInfo.runtimeArtifacts | []struct | 输出制品的原始路径和目标路径;仅在分阶段构建中添加。 | +| containerInfo.runtimeArtifacts.source | string | 制品在镜像构建器的原始路径。 | +| containerInfo.runtimeArtifacts.destination | string | 运行时镜像中制品的目标路径。 | +| containerInfo.runtimeArtifacts.keep | bool | 是否将数据保留在输出镜像中。 | +| *defaultBaseImage | string | 默认镜像构建器。 | +| *codeFramework | string | 代码框架类型,如:Java、Ruby。 | +| environment | []struct | 构建过程中的环境变量列表。 | +| environment.key | string | 环境变量的名称。 | +| environment.type | string | 环境变量键的类型。 | +| environment.description | string | 环境变量的描述。 | +| environment.optValues | []string | 环境变量的参数列表。 | +| environment.required | bool | 是否需要设置环境变量。 | +| environment.defaultValue | string | 环境变量的默认值。 | +| environment.value | string | 环境变量的值。 | +| iconPath | string | 应用名称。 | +| version | string | S2I 模板版本。 | +| description | string | 模板功能和用法的说明。 | diff --git a/content/zh/docs/project-user-guide/image-builder/source-to-image.md b/content/zh/docs/project-user-guide/image-builder/source-to-image.md index f7f277586..9b2ba1d68 100644 --- a/content/zh/docs/project-user-guide/image-builder/source-to-image.md +++ b/content/zh/docs/project-user-guide/image-builder/source-to-image.md @@ -12,28 +12,32 @@ Source-to-Image (S2I) 是一个工具箱和工作流,用于从源代码构建 ![构建流程](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-process.png) +## 视频演示 + + + ## 准备工作 - 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/),该系统已集成 S2I。 - 您需要创建一个 [GitHub](https://github.com/) 帐户和一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。本教程使用 Github 仓库提供源代码,用于构建镜像并推送至 Docker Hub。 -- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),请务必邀请该帐户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户 (`project-regular`),请务必邀请该用户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/set-ci-node/)。 ## 使用 Source-to-Image (S2I) ### 步骤 1:Fork 示例仓库 -登录 GitHub 并 Fork GitHub 仓库 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 至您的 GitHub 个人帐户。 +登录 GitHub 并 Fork GitHub 仓库 [devops-maven-sample](https://github.com/kubesphere/devops-maven-sample) 至您的 GitHub 个人帐户。 -![Fork 仓库](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/fork-repository.PNG) +### 步骤 2:创建保密字典 -### 步骤 2:创建密钥 (Secret) - -以 `project-regular` 身份登录 KubeSphere 控制台,转到您的项目,分别为 Docker Hub 和 GitHub 创建密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。 +以 `project-regular` 身份登录 KubeSphere 控制台,转到您的项目,分别为 Docker Hub 和 GitHub 创建保密字典。有关更多信息,请参见[创建常用保密字典](../../../project-user-guide/configuration/secrets/#创建常用保密字典)。 {{< notice note >}} -如果您 Fork 的是公开仓库,则不需要创建 GitHub 密钥。 +如果您 Fork 的是公开仓库,则不需要创建 GitHub 保密字典。 {{}} @@ -41,11 +45,7 @@ Source-to-Image (S2I) 是一个工具箱和工作流,用于从源代码构建 1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。 - ![创建服务](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-service.PNG) - -2. 选中**通过代码构建新的服务**下的 **Java**,将其命名为 `s2i-demo` 并点击**下一步**。 - - ![选择语言类型](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/select-lang-type.PNG) +2. 选中**通过代码创建服务**下的 **Java**,将其命名为 `s2i-demo` 并点击**下一步**。 {{< notice note >}} @@ -55,89 +55,57 @@ Source-to-Image (S2I) 是一个工具箱和工作流,用于从源代码构建 3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。 - ![构建设置](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build_settings.png) - **服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。 **构建环境**:选择 **kubesphere/java-8-centos7:v2.1.0**。 - **代码地址**:源代码仓库地址(目前支持 Git)。您可以指定代码分支和在源代码终端的相对路径。URL 支持 HTTP 和 HTTPS。在该字段粘贴已 Fork 仓库的 URL(您自己仓库的地址)。 + **代码仓库 URL**:源代码仓库地址(目前支持 Git)。您可以指定代码分支和在源代码终端的相对路径。URL 支持 HTTP 和 HTTPS。在该字段粘贴已 Fork 仓库的 URL(您自己仓库的地址)。 - ![复制仓库代码](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/copy-repo-code.PNG) + **代码仓库分支**:分支用于构建镜像。本教程中在此输入 `master`。您可以输入 `dependency` 进行缓存测试。 - **分支**:分支用于构建镜像。本教程中在此输入 `master`。您可以输入 `dependency` 进行缓存测试。 - - **密钥**:您不需要为公共仓库提供密钥。如果您想使用私有仓库,请选择 GitHub 密钥。 + **代码仓库密钥**:您不需要为公共仓库提供保密字典。如果您想使用私有仓库,请选择 GitHub 保密字典。 **镜像名称**:自定义镜像名称。本教程会向 Docker Hub 推送镜像,故请输入 `dockerhub_username/s2i-sample`。`dockerhub_username` 是您的 Docker ID,请确保该 ID 有权限推送和拉取镜像。 - **tag**:镜像标签,请输入 `latest`。 + **镜像标签**:镜像标签,请输入 `latest`。 - **Target image repository**:镜像会推送至 Docker Hub,故请选择 Docker Hub 密钥。 + **目标镜像仓库**:镜像会推送至 Docker Hub,故请选择 Docker Hub 保密字典。 **高级设置**:您可以定义代码相对路径。该字段请使用默认的 `/`。 -4. 在**容器设置**页面,下拉至**服务设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-1`),**容器端口**和**服务端口**都输入 `8080`。 +4. 在**容器组设置**页面,下拉至**端口设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-1`),**容器端口**和**服务端口**都输入 `8080`。 - ![服务设置](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-settings.PNG) - -5. 下拉至**健康检查器**并选中,填写以下参数设置就绪探针。探针设置完成后点击 **√**,然后点击**下一步**继续。 - - ![健康检查器](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/health-checker.PNG) +5. 下拉至**健康检查**并选中,填写以下参数设置**就绪检查**。探针设置完成后点击 **√**,然后点击**下一步**继续。 **HTTP 请求**:选择 **HTTP** 作为协议,输入 `/` 作为路径(本教程中的根路径),输入 `8080` 作为暴露端口。 - **初始延迟**:容器启动后,存活探针启动之前等待的秒数。本字段输入 `30`。 + **初始延迟(s)**:容器启动后,存活探针启动之前等待的秒数。本字段输入 `30`。 - **超时时间**:探针超时的秒数。本字段输入 `10`。 + **超时时间(s)**:探针超时的秒数。本字段输入 `10`。 - 其他字段请直接使用默认值。有关如何在**容器设置**页面配置探针和设置其他参数的更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。 + 其他字段请直接使用默认值。有关如何在**容器设置**页面配置探针和设置其他参数的更多信息,请参见[容器组设置](../../../project-user-guide/application-workloads/container-image-settings/)。 -6. 在**挂载存储**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。点击**下一步**继续。 +6. 在**存储卷设置**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。点击**下一步**继续。 -7. 在**高级设置**页面,选中**外网访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 +7. 在**高级设置**页面,选中**外部访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。 - ![创建完成](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-finish.PNG) - -8. 点击左侧导航栏的**构建镜像**,您可以看到正在构建示例镜像。 - - ![构建中](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/building.PNG) +8. 点击左侧导航栏的**镜像构建器**,您可以看到正在构建示例镜像。 ### 步骤 4:查看结果 -1. 稍等片刻,您可以看到镜像状态变为**成功**。 +1. 稍等片刻,您可以看到镜像构建器状态变为**成功**。 - ![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/successful-result.PNG) +2. 点击该镜像构建器前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 -2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的 查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`。 - - ![构建日志](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-log.PNG) - -3. 回到上一层页面,您可以看到该镜像相应的任务、部署和服务都已成功创建。 - - #### 服务 - - ![service](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service.PNG) - - #### 部署 - - ![deployment](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/deployment.PNG) - - #### 任务 - - ![job](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/job.PNG) +3. 回到**服务**、**部署**和**任务**页面,您可以看到该镜像相应的服务、部署和任务都已成功创建。 4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。 - ![Docker 镜像](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/docker-image.PNG) - ### 步骤 5:访问 S2I 服务 1. 在**服务**页面,请点击 S2I 服务前往其详情页面。 - ![Service 详情](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-detail.PNG) - -2. 要访问该服务,您可以执行 `curl` 命令使用 Endpoint 或者访问 `:`。例如: +2. 要访问该服务,您可以执行 `curl` 命令使用 Endpoint 或者访问 `:`。例如: ```bash $ curl 10.10.131.44:8080 diff --git a/content/zh/docs/project-user-guide/storage/volume-snapshots.md b/content/zh/docs/project-user-guide/storage/volume-snapshots.md index 4e6d65e37..184704ea6 100644 --- a/content/zh/docs/project-user-guide/storage/volume-snapshots.md +++ b/content/zh/docs/project-user-guide/storage/volume-snapshots.md @@ -12,7 +12,7 @@ weight: 10320 ## 准备工作 -- 您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 您需要确保 Kubernetes 版本为 1.17 或更新版本。 @@ -25,7 +25,7 @@ weight: 10320 2. 在存储卷详情页面,点击**更多操作**并从下拉菜单中选择**创建快照**。 -3. 在出现的对话框中,设置快照的名称,然后点击**确定**完成创建。快照的名称将作为快照的唯一标识符。 +3. 在弹出的对话框中,设置快照的名称并选择快照类型,然后点击**确定**完成创建。快照的名称将作为快照的唯一标识符。 4. 新建的快照显示在**存储卷快照**列表中。 @@ -38,8 +38,6 @@ weight: 10320 1. 以 `project-regular` 用户登录 KubeSphere Web 控制台,进入快照详情页面,然后点击**应用**来使用快照。其他步骤与直接创建存储卷基本相同。 - ![apply-volume](/images/docs/zh-cn/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg) - 2. 在弹出的对话框中设置存储卷的名称,然后点击**下一步**。 {{< notice note >}} diff --git a/content/zh/docs/project-user-guide/storage/volumes.md b/content/zh/docs/project-user-guide/storage/volumes.md index 467e50b8a..3fc3ecf8c 100644 --- a/content/zh/docs/project-user-guide/storage/volumes.md +++ b/content/zh/docs/project-user-guide/storage/volumes.md @@ -14,7 +14,7 @@ weight: 10310 ## 准备工作 -- 您需要创建一个企业空间、一个项目和一个帐户(例如 `project-regular`)。该帐户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间、一个项目和一个用户(例如 `project-regular`)。该用户必须已邀请至该项目,并具有 `operator` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 - 如需使用动态存储卷供应,您需要配置一个支持动态供应的[存储类型](../../../cluster-administration/persistent-volume-and-storage-class/)。 @@ -30,53 +30,45 @@ weight: 10310 {{< notice note >}} - 您可以在对话框右上角启用**编辑模式**来查看存储卷的 YAML 清单文件,并通过直接编辑清单文件来创建存储卷。您也可继续执行后续步骤在控制台上创建存储卷。 + 您可以在对话框右上角启用**编辑 YAML** 来查看存储卷的 YAML 清单文件,并通过直接编辑清单文件来创建存储卷。您也可继续执行后续步骤在控制台上创建存储卷。 {{}} -4. 在**存储卷设置**页面,选择创建存储卷的方式。 +4. 在**存储设置**页面,选择创建存储卷的方式。 - - **通过存储类型**:您可以在 KubeSphere [安装前](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)或[安装后](../../../cluster-administration/persistent-volume-and-storage-class/)配置存储类型。 + - **通过存储类型创建**:您可以在 KubeSphere [安装前](../../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)或[安装后](../../../cluster-administration/persistent-volume-and-storage-class/)配置存储类型。 - **通过存储卷快照创建**:如需通过快照创建存储卷,您必须先创建存储卷快照。 - 选择**通过存储类型**。有关通过存储卷快照创建存储卷的更多信息,请参阅[存储卷快照](../volume-snapshots/)。 + 选择**通过存储类型创建**。有关通过存储卷快照创建存储卷的更多信息,请参阅[存储卷快照](../volume-snapshots/)。 5. 从下拉列表中选择存储类型。本教程以青云QingCloud 平台提供的 `csi-standard` 标准存储类型为例。您可以根据需要选择其他存储类型。 - ![select-storage-class](/images/docs/zh-cn/project-user-guide/volume-management/volumes/select-storage-class.jpg) - 6. 由于一些 PersistentVolume 只支持特定的访问模式,页面上显示的访问模式会因您选择的存储类型而不同。访问模式一共有三种: - - **ReadWriteOnce (RWO)**:存储卷以单节点读写的形式挂载。 - - **ReadOnlyMany (ROX)**:存储卷以多节点只读的形式挂载。 - - **ReadWriteMany (RWX)**:存储卷以多节点读写的形式挂载。 + - **ReadWriteOnce**:存储卷以单节点读写的形式挂载。 + - **ReadOnlyMany**:存储卷以多节点只读的形式挂载。 + - **ReadWriteMany**:存储卷以多节点读写的形式挂载。 选择所需的访问模式。 7. 在**存储卷容量**区域设置存储卷的大小,然后点击**下一步**。 -8. 在**高级设置**页面,您可以为存储卷添加元数据,例如 **Label** 和 **Annotation**。元数据可用作搜索和调度资源的标识符。 +8. 在**高级设置**页面,您可以为存储卷添加元数据,例如**标签**和**注解**。元数据可用作搜索和调度资源的标识符。 9. 点击**创建**完成存储卷创建。 10. 新建的存储卷会显示在项目的**存储卷**页面。存储卷挂载至工作负载后,**挂载**列会显示为**已挂载**。 - ![volume-status](/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-status.jpg) - {{< notice note >}} -新建的存储卷也会显示在**集群管理**中的**存储卷**页面。通常情况下项目用户(例如 `project-regular`)无法查看该页面。集群管理员需要查看和跟踪项目中创建的存储卷。另一方面,集群管理员在**集群管理**中为项目创建的存储卷也会显示在项目的**存储卷**页面。 +新建的存储卷也会显示在**集群管理**中的**存储卷**页面。项目用户(例如 `project-regular`)可在**存储卷实例**列查看存储卷实例。集群管理员需要查看和跟踪项目中创建的存储卷。另一方面,集群管理员在**集群管理**中为项目创建的存储卷也会显示在项目的**存储卷**页面。 {{}} 11. 一些存储卷是动态供应的存储卷,它们的状态会在创建后立刻从**等待中**变为**准备就绪**。其他仍处于**等待中**的存储卷会在挂载至工作负载后变为**准备就绪**。存储卷是否支持动态供应取决于其存储类型。 - ![local-pending](/images/docs/zh-cn/project-user-guide/volume-management/volumes/local-pending.jpg) - 例如,如果您使用默认的存储类型 (OpenEBS) 安装 KubeSphere,您只能创建不支持动态供应的本地存储卷。这类存储卷的绑定模式由 YAML 文件中的 `VolumeBindingMode: WaitForFirstConsumer` 字段指定。 - ![volumebindingmode](/images/docs/project-user-guide/volume-management/volumes/volumebindingmode.jpg) - ## 挂载存储卷 创建[部署](../../../project-user-guide/application-workloads/deployments/)、[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)和[守护进程集](../../../project-user-guide/application-workloads/daemonsets/)等应用负载时,您可以为它们挂载存储卷。 @@ -87,33 +79,31 @@ weight: 10310 {{}} -在**挂载存储**页面,您可以为工作负载挂载不同的存储卷。 +在**存储卷设置**页面,您可以为工作负载挂载不同的存储卷。 -![volume-page](/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-page.jpg) +- **添加存储卷模板**(仅对[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)可用):存储卷模板用于动态创建 PVC。您需要设置存储卷名称、存储类型、访问模式、存储卷容量和挂载路径(以上参数都由 `volumeClaimTemplates` 字段指定),以便将对应 StorageClass 的 PVC 挂载至容器组。 -- **添加存储卷模板**(仅对[有状态副本集](../../../project-user-guide/application-workloads/statefulsets/)可用):存储卷模板用于动态创建 PVC。您需要设置存储卷名称、存储类型、访问模式、存储卷容量和挂载路径(以上参数都由 `volumeClaimTemplates` 字段指定),以便将对应 StorageClass 的 PVC 挂载至 Pod。 +- **挂载存储卷**:支持 emptyDir 存储卷和 PVC。 -- **添加存储卷**:支持 emptyDir 存储卷和 PVC。 + **挂载存储卷**页面提供了三类存储卷: - **添加存储卷**页面提供了三类存储卷: - - - **已有存储卷**:用 PVC 挂载。 + - **现有存储卷**:用 PVC 挂载。 持久卷可用于保存用户的持久数据。您需要提前创建存储卷(PVC),存储卷创建后会显示在列表中供选择。 - **临时存储卷**:用 emptyDir 存储卷挂载。 - 临时存储卷即 [emptyDir](https://kubernetes.io/zh/docs/concepts/storage/volumes/#emptydir) 存储卷,它在 Pod 分配到节点时创建,并且只要 Pod 在节点上运行就会一直存在。emptyDir 存储卷提供了一个空目录,可由 Pod 中的容器读写。取决于您的部署环境,emptyDir 存储卷可以存放在节点所使用的任何介质上,例如机械硬盘或 SSD。当 Pod 由于某些原因从节点上移除时,emptyDir 存储卷中的数据也会被永久删除。 + 临时存储卷即 [emptyDir](https://kubernetes.io/zh/docs/concepts/storage/volumes/#emptydir) 存储卷,它在容器组分配到节点时创建,并且只要容器组在节点上运行就会一直存在。emptyDir 存储卷提供了一个空目录,可由容器组中的容器读写。取决于您的部署环境,emptyDir 存储卷可以存放在节点所使用的任何介质上,例如机械硬盘或 SSD。当容器组由于某些原因从节点上移除时,emptyDir 存储卷中的数据也会被永久删除。 - - **HostPath**:用 hostPath 存储卷挂载。 + - **HostPath 存储卷**:用 hostPath 存储卷挂载。 - hostPath 存储卷将主机节点文件系统中的文件或目录挂载至 Pod。大多数 Pod 可能不需要这类存储卷,但它可以为一些应用提供了强大的逃生舱 (Escape Hatch)。有关更多信息,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/volumes/#hostpath)。 + hostPath 存储卷将主机节点文件系统中的文件或目录挂载至容器组。大多数容器组可能不需要这类存储卷,但它可以为一些应用提供了强大的逃生舱 (Escape Hatch)。有关更多信息,请参阅 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/storage/volumes/#hostpath)。 -- **挂载配置文件或密钥**:支持 [ConfigMap](../../../project-user-guide/configuration/configmaps/) 或[密钥 (Secret)](../../../project-user-guide/configuration/secrets/) 键值对。 +- **挂载配置文件或密钥**:支持[配置字典](../../../project-user-guide/configuration/configmaps/)或[保密字典](../../../project-user-guide/configuration/secrets/)键值对。 - [密钥](https://kubernetes.io/zh/docs/concepts/storage/volumes/#secret)存储卷用于为 Pod 提供密码、OAuth 凭证、SSH 密钥等敏感信息。密钥存储卷由 tmpfs(基于 RAM 的文件系统)支持,所以数据不会写入非易失性存储中。 + [保密字典](https://kubernetes.io/zh/docs/concepts/storage/volumes/#secret)存储卷用于为容器组提供密码、OAuth 凭证、SSH 密钥等敏感信息。密钥存储卷由 tmpfs(基于 RAM 的文件系统)支持,所以数据不会写入非易失性存储中。 - [ConfigMap](https://kubernetes.io/zh/docs/concepts/storage/volumes/#configmap) 存储卷以键值对的形式存放配置数据。ConfigMap 资源可用于向 Pod 中注入配置数据。存放在 ConfigMap 对象中的数据可以由 `configMap` 类型的存储卷引用,并由 Pod 中运行的容器化应用使用。ConfigMap 通常用于以下场景: + [配置字典](https://kubernetes.io/zh/docs/concepts/storage/volumes/#configmap)存储卷以键值对的形式存放配置数据。ConfigMap 资源可用于向容器组中注入配置数据。存放在 ConfigMap 对象中的数据可以由 `configMap` 类型的存储卷引用,并由容器组中运行的容器化应用使用。ConfigMap 通常用于以下场景: - 设置环境变量。 - 设置容器中的命令参数。 @@ -127,9 +117,7 @@ weight: 10310 在存储卷详情页面,您可以点击**编辑信息**修改存储卷的基本信息。点击**更多操作**可编辑 YAML 文件或删除存储卷。 -如需删除存储卷,请确保存储卷未挂载至任何工作负载。如需卸载存储卷,请进入工作负载的详情页面,点击**更多操作**,从下拉菜单中选择**编辑配置模板**,在弹出的对话框中选择**存储卷**,然后点击垃圾桶图标将存储卷卸载。 - -![delete-volume](/images/docs/zh-cn/project-user-guide/volume-management/volumes/delete-volume.jpg) +如需删除存储卷,请确保存储卷未挂载至任何工作负载。如需卸载存储卷,请进入工作负载的详情页面,点击**更多操作**,从下拉菜单中选择**编辑设置**,在弹出的对话框中选择**存储卷**,然后点击垃圾桶图标将存储卷卸载。 在您点击**删除**后,如果存储卷的状态长时间保持为**删除中**,请使用以下命令手动删除: @@ -141,11 +129,9 @@ kubectl patch pvc -p '{"metadata":{"finalizers":null}}' **更多操作**下拉菜单提供了三个额外功能,这些功能基于 KubeSphere 的底层存储插件 `Storage Capability`。具体如下: -- **存储卷克隆**:创建一个相同的存储卷。 +- **克隆**:创建一个相同的存储卷。 - **创建快照**:创建一个存储卷快照,可用于创建其他存储卷。有关更多信息,请参阅[存储卷快照](../volume-snapshots/)。 -- **存储卷扩容**:增加存储卷的容量。请注意,您无法在控制台上减少存储卷的容量,因为数据可能会因此丢失。 - -![volume-detail-page](/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-detail-page.jpg) +- **扩容**:增加存储卷的容量。请注意,您无法在控制台上减少存储卷的容量,因为数据可能会因此丢失。 有关 `Storage Capability` 的更多信息,请参阅[设计文档](https://github.com/kubesphere/community/blob/master/sig-storage/concepts-and-designs/storage-capability-interface.md)。 @@ -159,6 +145,4 @@ kubectl patch pvc -p '{"metadata":{"finalizers":null}}' KubeSphere 从 Kubelet 获取 `Filesystem` 模式的 PVC 的指标数据(包括容量使用情况和 inode 使用情况),从而对存储卷进行监控。 -![volume-monitoring](/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-monitoring.jpg) - 有关存储卷监控的更多信息,请参阅 [Research on Volume Monitoring](https://github.com/kubesphere/kubesphere/issues/2921)。 diff --git a/content/zh/docs/quick-start/all-in-one-on-linux.md b/content/zh/docs/quick-start/all-in-one-on-linux.md index ec53225a8..f4ffdd5dd 100644 --- a/content/zh/docs/quick-start/all-in-one-on-linux.md +++ b/content/zh/docs/quick-start/all-in-one-on-linux.md @@ -150,7 +150,7 @@ KubeKey 是用 Go 语言开发的一款全新的安装工具,代替了以前 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) 下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -166,7 +166,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -181,7 +181,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号下载指定版本。 {{}} @@ -202,12 +202,12 @@ chmod +x kk 若要同时安装 Kubernetes 和 KubeSphere,可参考以下示例命令: ```bash -./kk create cluster --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk create cluster --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` {{< notice note >}} -- 安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 +- 安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:1.19.x、1.20.x、1.21.x 或 1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 一般来说,对于 All-in-One 安装,您无需更改任何配置。 - 如果您在这一步的命令中不添加标志 `--with-kubesphere`,则不会部署 KubeSphere,KubeKey 将只安装 Kubernetes。如果您添加标志 `--with-kubesphere` 时不指定 KubeSphere 版本,则会安装最新版本的 KubeSphere。 - KubeKey 会默认安装 [OpenEBS](https://openebs.io/) 为开发和测试环境提供 LocalPV 以方便新用户。对于其他存储类型,请参见[持久化存储配置](../../installing-on-linux/persistent-storage-configurations/understand-persistent-storage/)。 @@ -258,9 +258,7 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx {{}} -登录至控制台后,您可以在**服务组件**中查看各个组件的状态。如果要使用相关服务,您可能需要等待部分组件启动并运行。您也可以使用 `kubectl get pod --all-namespaces` 来检查 KubeSphere 相关组件的运行状况。 - -![service-components](/images/docs/zh-cn/quickstart/all-in-one-on-linux/service-components.png) +登录至控制台后,您可以在**系统组件**中查看各个组件的状态。如果要使用相关服务,您可能需要等待部分组件启动并运行。您也可以使用 `kubectl get pod --all-namespaces` 来检查 KubeSphere 相关组件的运行状况。 ## 启用可插拔组件(可选) diff --git a/content/zh/docs/quick-start/create-workspace-and-project.md b/content/zh/docs/quick-start/create-workspace-and-project.md index a0e8bf8e9..0dd4ab094 100644 --- a/content/zh/docs/quick-start/create-workspace-and-project.md +++ b/content/zh/docs/quick-start/create-workspace-and-project.md @@ -1,12 +1,12 @@ --- -title: "创建企业空间、项目、帐户和角色" -keywords: 'KubeSphere, Kubernetes, 多租户, 企业空间, 帐户, 角色, 项目' +title: "创建企业空间、项目、用户和平台角色" +keywords: 'KubeSphere, Kubernetes, 多租户, 企业空间, 帐户, 平台角色, 项目' description: '了解如何利用 KubeSphere 中的多租户功能在不同级别进行细粒度访问控制。' -linkTitle: "创建企业空间、项目、帐户和角色" +linkTitle: "创建企业空间、项目、用户和平台角色" weight: 2300 --- -本快速入门演示如何创建企业空间、角色和用户帐户。同时,您将学习如何在企业空间中创建项目和 DevOps 工程,用于运行工作负载。完成本教程后,您将熟悉 KubeSphere 的多租户管理系统,并使用本教程中创建的资源(例如企业空间和帐户等)完成其他教程中的操作。 +本快速入门演示如何创建企业空间、用户和平台角色。同时,您将学习如何在企业空间中创建项目和 DevOps 项目,用于运行工作负载。完成本教程后,您将熟悉 KubeSphere 的多租户管理系统,并使用本教程中创建的资源(例如企业空间和帐户等)完成其他教程中的操作。 ## 准备工作 @@ -22,17 +22,17 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 ## 动手实验 -### 步骤 1:创建帐户 +### 步骤 1:创建用户 -安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个帐户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个帐户 `user-manager`,然后使用 `user-manager` 创建新帐户。 +安装 KubeSphere 之后,您需要向平台添加具有不同角色的用户,以便他们可以针对自己授权的资源在不同的层级进行工作。一开始,系统默认只有一个用户 `admin`,具有 `platform-admin` 角色。在本步骤中,您将创建一个用户 `user-manager`,然后使用 `user-manager` 创建新用户。 1. 以 `admin` 身份使用默认帐户和密码 (`admin/P@88w0rd`) 登录 Web 控制台。 {{< notice tip >}} - 出于安全考虑,强烈建议您在首次登录控制台时更改密码。若要更改密码,在右上角的下拉列表中选择**个人设置**,在**密码设置**中设置新密码,您也可以在**个人设置**中修改控制台语言。 + 出于安全考虑,强烈建议您在首次登录控制台时更改密码。若要更改密码,在右上角的下拉列表中选择**用户设置**,在**密码设置**中设置新密码,您也可以在**用户设置** > **基本信息**中修改控制台语言。 {{}} -2. 点击左上角的**平台管理**,然后选择**访问控制**。在左侧导航栏中,选择**帐户角色**。四个内置角色的描述信息如下表所示。 +2. 点击左上角的**平台管理**,然后选择**访问控制**。在左侧导航栏中,选择**平台角色**。四个内置角色的描述信息如下表所示。 @@ -64,11 +64,9 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 内置角色由 KubeSphere 自动创建,无法编辑或删除。 {{}} -3. 在**帐户管理**中,点击**创建**。在弹出的对话框中,提供所有必要信息(带有*标记),然后在**角色**一栏选择 `users-manager`。请参考下图示例。 +3. 在**用户**中,点击**创建**。在弹出的对话框中,提供所有必要信息(带有*标记),然后在**角色**一栏选择 `users-manager`。请参考下图示例。 - ![添加用户](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/添加用户.png) - - 完成后,点击**确定**。新创建的帐户将显示在**帐户管理**中的帐户列表中。 + 完成后,点击**确定**。新创建的帐户将显示在**用户**中的帐户列表中。 4. 切换帐户使用 `user-manager` 重新登录,创建如下四个新帐户,这些帐户将在其他的教程中使用。 @@ -95,22 +93,21 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 - + - +
    project-admin platform-regular创建和管理项目以及 DevOps 工程,并邀请新成员加入项目。创建和管理项目以及 DevOps 项目,并邀请新成员加入项目。
    project-regular platform-regularproject-regular 将由 project-admin 邀请至项目或 DevOps 工程。该帐户将用于在指定项目中创建工作负载、流水线和其他资源。project-regular 将由 project-admin 邀请至项目或 DevOps 项目。该帐户将用于在指定项目中创建工作负载、流水线和其他资源。
    5. 查看创建的四个帐户。 - ![帐户列表](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/帐户列表.png) ### 步骤 2:创建企业空间 -在本步骤中,您需要使用上一个步骤中创建的帐户 `ws-manager` 创建一个企业空间。作为管理项目、DevOps 工程和组织成员的基本逻辑单元,企业空间是 KubeSphere 多租户系统的基础。 +在本步骤中,您需要使用上一个步骤中创建的帐户 `ws-manager` 创建一个企业空间。作为管理项目、DevOps 项目和组织成员的基本逻辑单元,企业空间是 KubeSphere 多租户系统的基础。 1. 以 `ws-manager` 身份登录 KubeSphere,它具有管理平台上所有企业空间的权限。点击左上角的**平台管理**,选择**访问控制**。在**企业空间**中,可以看到仅列出了一个默认企业空间 `system-workspace`,即系统企业空间,其中运行着与系统相关的组件和服务,您无法删除该企业空间。 @@ -122,7 +119,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 {{}} -3. 登出控制台,然后以 `ws-admin` 身份重新登录。在**企业空间设置**中,选择**企业成员**,然后点击**邀请成员**。 +3. 登出控制台,然后以 `ws-admin` 身份重新登录。在**企业空间设置**中,选择**企业空间成员**,然后点击**邀请**。 4. 邀请 `project-admin` 和 `project-regular` 进入企业空间,分别授予 `workspace-self-provisioner` 和 `workspace-viewer` 角色,点击**确定**。 @@ -130,9 +127,7 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 实际角色名称的格式:`-`。例如,在名为 `demo-workspace` 的企业空间中,角色 `viewer` 的实际角色名称为 `demo-workspace-viewer`。 {{}} - ![邀请列表](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请列表.png) - -5. 将 `project-admin` 和 `project-regular` 都添加到企业空间后,点击**确定**。在**企业成员**中,您可以看到列出的三名成员。 +5. 将 `project-admin` 和 `project-regular` 都添加到企业空间后,点击**确定**。在**企业空间**中,您可以看到列出的三名成员。 @@ -149,11 +144,11 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 - + - +
    project-admin workspace-self-provisioner创建和管理项目以及 DevOps 工程,并邀请新成员加入项目。创建和管理项目以及 DevOps 项目,并邀请新成员加入项目。
    project-regular workspace-viewerproject-regular 将由 project-admin 邀请至项目或 DevOps 工程。该帐户将用于在指定项目中创建工作负载、流水线和其他资源。project-regular 将由 project-admin 邀请至项目或 DevOps 项目。该帐户将用于在指定项目中创建工作负载、流水线和其他资源。
    @@ -162,51 +157,45 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 在此步骤中,您需要使用在上一步骤中创建的帐户 `project-admin` 来创建项目。KubeSphere 中的项目与 Kubernetes 中的命名空间相同,为资源提供了虚拟隔离。有关更多信息,请参见[命名空间](https://kubernetes.io/zh/docs/concepts/overview/working-with-objects/namespaces/)。 -1. 以 `project-admin` 身份登录 KubeSphere Web 控制台,在**项目管理**中,点击**创建**。 +1. 以 `project-admin` 身份登录 KubeSphere Web 控制台,在**项目**中,点击**创建**。 2. 输入项目名称(例如 `demo-project`),然后点击**确定**完成,您还可以为项目添加别名和描述。 -3. 在**项目管理**中,点击刚创建的项目查看其详情页面。 +3. 在**项目**中,点击刚创建的项目查看其详情页面。 -4. 在项目的**概览**页面,默认情况下未设置项目配额。您可以点击**设置**并根据需要指定[资源请求和限制](../../workspace-administration/project-quotas/)(例如:CPU 和内存的限制分别设为 1 Core 和 1000 Gi)。 +4. 在项目的**概览**页面,默认情况下未设置项目配额。您可以点击**编辑配额**并根据需要指定[资源请求和限制](../../workspace-administration/project-quotas/)(例如:CPU 和内存的限制分别设为 1 Core 和 1000 Gi)。 - ![项目配额](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/项目配额.png) - -5. 邀请 `project-regular` 至该项目,并授予该用户 `operator` 角色。请参考下图以了解具体步骤。 - - ![邀请成员至项目](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请成员至项目.png) +5. 在**项目设置** > **项目成员**中,邀请 `project-regular` 至该项目,并授予该用户 `operator` 角色。 {{< notice info >}} 具有 `operator` 角色的用户是项目维护者,可以管理项目中除用户和角色以外的资源。 {{}} + +6. 在创建[应用路由](../../project-user-guide/application-workloads/routes/)(即 Kubernetes 中的 [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/))之前,需要启用该项目的网关。网关是在项目中运行的 [NGINX Ingress 控制器](https://github.com/kubernetes/ingress-nginx)。若要设置网关,请转到**项目设置**中的**网关设置**,然后点击**设置网关**。此步骤中仍使用帐户 `project-admin`。 -6. 在创建[应用路由](../../project-user-guide/application-workloads/routes/)(即 Kubernetes 中的 [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/))之前,需要启用该项目的网关。网关是在项目中运行的 [NGINX Ingress 控制器](https://github.com/kubernetes/ingress-nginx)。若要设置网关,请转到**项目设置**中的**高级设置**,然后点击**设置网关**。此步骤中仍使用帐户 `project-admin`。 +7. 选择访问方式 **NodePort**,然后点击**确定**。 -7. 选择访问方式 **NodePort**,然后点击**保存**。 - -8. 在**外网访问**下,可以在页面上看到网关地址以及 http/https 的端口。 +8. 在**网关设置**下,可以在页面上看到网关地址以及 http/https 的端口。 {{< notice note >}} 如果要使用 `LoadBalancer` 暴露服务,则需要使用云厂商的 LoadBalancer 插件。如果您的 Kubernetes 集群在裸机环境中运行,建议使用 [OpenELB](https://github.com/kubesphere/openelb) 作为 LoadBalancer 插件。 {{}} - ![完成网关设置](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/完成网关设置.png) - ### 步骤 4:创建角色 完成上述步骤后,您已了解可以为不同级别的用户授予不同角色。先前步骤中使用的角色都是 KubeSphere 提供的内置角色。在此步骤中,您将学习如何创建自定义角色以满足工作需求。 1. 再次以 `admin` 身份登录 KubeSphere Web 控制台,转到**访问控制**。 -2. 点击左侧导航栏中的**帐户角色**,再点击右侧的**创建**。 +2. 点击左侧导航栏中的**平台角色**,再点击右侧的**创建**。 {{< notice note >}} - **帐户角色**页面的预设角色无法编辑或删除。 + **平台角色**页面的预设角色无法编辑或删除。 {{}} -3. 在**创建帐户角色**对话框中,设置角色标识符(例如,`clusters-admin`)、角色名称和描述信息,然后点击**编辑权限**。 +3. 在**创建平台角色**对话框中,设置角色标识符(例如,`clusters-admin`)、角色名称和描述信息,然后点击**编辑权限**。 {{< notice note >}} @@ -225,26 +214,25 @@ KubeSphere 的多租户系统分**三个**层级,即集群、企业空间和 {{}} -5. 在**帐户角色**页面,可以点击所创建角色的名称查看角色详情,点击 以编辑角色、编辑角色权限或删除该角色。 +5. 在**平台角色**页面,可以点击所创建角色的名称查看角色详情,点击 以编辑角色、编辑角色权限或删除该角色。 -6. 在**帐户管理**页面,可以在创建帐户或编辑现有帐户时为帐户分配该角色。 +6. 在**用户**页面,可以在创建帐户或编辑现有帐户时为帐户分配该角色。 -### 步骤 5:创建 DevOps 工程(可选) +### 步骤 5:创建 DevOps 项目(可选) {{< notice note >}} -若要创建 DevOps 工程,需要预先安装 KubeSphere DevOps 系统,该系统是个可插拔的组件,提供 CI/CD 流水线、Binary-to-Image 和 Source-to-Image 等功能。有关如何启用 DevOps 的更多信息,请参见 [KubeSphere DevOps 系统](../../pluggable-components/devops/)。 +若要创建 DevOps 项目,需要预先启用 KubeSphere DevOps 系统,该系统是个可插拔的组件,提供 CI/CD 流水线、Binary-to-Image 和 Source-to-Image 等功能。有关如何启用 DevOps 的更多信息,请参见 [KubeSphere DevOps 系统](../../pluggable-components/devops/)。 {{}} -1. 以 `project-admin` 身份登录控制台,在 **DevOps 工程**中,点击**创建**。 +1. 以 `project-admin` 身份登录控制台,在 **DevOps 项目**中,点击**创建**。 -2. 输入 DevOps 工程名称(例如 `demo-devops`),然后点击**确定**,也可以为该工程添加别名和描述。 +2. 输入 DevOps 项目名称(例如 `demo-devops`),然后点击**确定**,也可以为该项目添加别名和描述。 -3. 点击刚创建的工程查看其详细页面。 +3. 点击刚创建的项目查看其详细页面。 -4. 转到**工程管理**,然后选择**工程成员**。点击**邀请成员**授予 `project-regular` 用户 `operator` 的角色,允许其创建流水线和凭证。 +4. 转到 **DevOps 项目设置**,然后选择 **DevOps 项目成员**。点击**邀请**授予 `project-regular` 用户 `operator` 的角色,允许其创建流水线和凭证。 - ![邀请devops成员](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请devops成员.png) -至此,您已熟悉 KubeSphere 的多租户管理系统。在其他教程中,`project-regular` 帐户还将用于演示如何在项目或 DevOps 工程中创建应用程序和资源。 +至此,您已熟悉 KubeSphere 的多租户管理系统。在其他教程中,`project-regular` 帐户还将用于演示如何在项目或 DevOps 项目中创建应用程序和资源。 diff --git a/content/zh/docs/quick-start/deploy-bookinfo-to-k8s.md b/content/zh/docs/quick-start/deploy-bookinfo-to-k8s.md index bb2d611a0..d5e807ee5 100644 --- a/content/zh/docs/quick-start/deploy-bookinfo-to-k8s.md +++ b/content/zh/docs/quick-start/deploy-bookinfo-to-k8s.md @@ -22,12 +22,12 @@ weight: 2400 - 您需要启用 [KubeSphere 服务网格](../../pluggable-components/service-mesh/)。 -- 您需要完成[创建企业空间、项目、帐户和角色](../create-workspace-and-project/)中的所有任务。 +- 您需要完成[创建企业空间、项目、用户和角色](../create-workspace-and-project/)中的所有任务。 -- 您需要启用**应用治理**。有关更多信息,请参见[设置网关](../../project-administration/project-gateway/#设置网关)。 +- 您需要启用**链路追踪**。有关更多信息,请参见[设置网关](../../project-administration/project-gateway/#设置网关)。 {{< notice note >}} - 您需要启用**应用治理**以使用追踪功能。启用后若无法访问路由 (Ingress),请检查您的路由是否已经添加注释(例如:`nginx.ingress.kubernetes.io/service-upstream: true`)。 + 您需要启用**链路追踪**以使用追踪功能。启用后若无法访问路由 (Ingress),请检查您的路由是否已经添加注释(例如:`nginx.ingress.kubernetes.io/service-upstream: true`)。 {{}} ## 什么是 Bookinfo 应用 @@ -49,9 +49,7 @@ Bookinfo 应用由以下四个独立的微服务组成,其中 **reviews** 微 1. 使用帐户 `project-regular` 登录控制台并访问项目 (`demo-project`)。前往**应用负载**下的**应用**,点击右侧的**部署示例应用**。 -2. 在出现的对话框中点击**下一步**,其中必填字段已经预先填好,相关组件也已经设置完成。您无需修改设置,只需在最后一页(**外网访问**)点击**创建**。 - - ![create-bookinfo](/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png) +2. 在出现的对话框中点击**下一步**,其中必填字段已经预先填好,相关组件也已经设置完成。您无需修改设置,只需在最后一页(**路由设置**)点击**创建**。 {{< notice note >}} @@ -61,8 +59,6 @@ KubeSphere 会自动创建主机名。若要更改主机名,请将鼠标悬停 3. 在**工作负载**中,确保这四个部署都处于`运行中`状态,这意味着该应用已经成功创建。 - ![running](/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running.png) - {{< notice note >}}可能需要等几分钟才能看到部署正常运行。 {{}} @@ -75,8 +71,6 @@ KubeSphere 会自动创建主机名。若要更改主机名,请将鼠标悬停 2. 详情页面中显示了用于访问 Bookinfo 应用的主机名和端口号。 - ![details-page](/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/details-page.png) - 3. 由于将通过 NodePort 在集群外访问该应用,因此您需要在安全组中为出站流量开放上图中的端口,并按需设置端口转发规则。 4. 在本地 hosts 文件 (`/etc/hosts`) 中添加一个条目将主机名映射到对应的 IP 地址,例如: @@ -91,7 +85,7 @@ KubeSphere 会自动创建主机名。若要更改主机名,请将鼠标悬停 {{}} -5. 完成后,点击 访问该应用。 +5. 完成后,点击**访问服务**访问该应用。 6. 在应用详情页面,点击左下角的 **Normal user**。 diff --git a/content/zh/docs/quick-start/enable-pluggable-components.md b/content/zh/docs/quick-start/enable-pluggable-components.md index 4ba10bc0d..eb2d00177 100644 --- a/content/zh/docs/quick-start/enable-pluggable-components.md +++ b/content/zh/docs/quick-start/enable-pluggable-components.md @@ -50,7 +50,7 @@ weight: 2600 如果采用 [All-in-one 模式安装](../../quick-start/all-in-one-on-linux/),您无需创建 `config-sample.yaml` 文件,因为 all-in-one 模式可以通过一条命令直接创建集群。通常,all-in-one 模式适用于刚接触 KubeSphere 并希望快速上手该系统的用户。如果要在此模式下启用可插拔组件(例如,出于测试目的),请参考[在安装后启用可插拔组件](#在安装后启用可插拔组件)。 {{}} -2. 在此文件中,将 `enabled` 的值从 `false` 改为 `true`。这是[完整文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)供您参考,修改完成后保存文件。 +2. 在此文件中,将 `enabled` 的值从 `false` 改为 `true`。这是[完整文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)供您参考,修改完成后保存文件。 3. 使用该配置文件创建集群: @@ -73,14 +73,12 @@ weight: 2600 3. 编辑完成后保存文件,执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml kubectl apply -f cluster-configuration.yaml ``` -无论是在 Linux 上还是在 Kubernetes 上安装 KubeSphere,安装后都可以在 KubeSphere 的 Web 控制台中检查已启用组件的状态。转到**服务组件**,可以看到类似如下图片: - -![服务组件](/images/docs/zh-cn/quickstart/enable-pluggable-components/服务组件.jpg) +无论是在 Linux 上还是在 Kubernetes 上安装 KubeSphere,安装后都可以在 KubeSphere 的 Web 控制台中检查已启用组件的状态。 ## 在安装后启用可插拔组件 @@ -94,19 +92,13 @@ weight: 2600 1. 以 `admin` 身份登录控制台。点击左上角的**平台管理** ,然后选择**集群管理**。 - ![集群管理](/images/docs/zh-cn/quickstart/enable-pluggable-components/集群管理.png) - -2. 点击**自定义资源 CRD**,然后在搜索栏中输入 `clusterconfiguration`,点击搜索结果进入其详情页面。 - - ![CRD](/images/docs/zh-cn/quickstart/enable-pluggable-components/CRD.png) +2. 点击 **CRD**,然后在搜索栏中输入 `clusterconfiguration`,点击搜索结果进入其详情页面。 {{< notice info >}} -自定义资源定义 (CRD) 允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些自定义资源。 +定制资源定义(CRD)允许用户在不增加额外 API 服务器的情况下创建一种新的资源类型,用户可以像使用其他 Kubernetes 原生对象一样使用这些定制资源。 {{}} -3. 在**资源列表**中,点击 `ks-installer` 右侧的三个点,然后选择**编辑配置文件**。 - - ![编辑配置文件](/images/docs/zh-cn/quickstart/enable-pluggable-components/编辑配置文件.png) +3. 在**自定义资源**中,点击 `ks-installer` 右侧的三个点,然后选择**编辑 YAML**。 4. 在该配置文件中,将对应组件 `enabled` 的 `false` 更改为 `true`,以启用要安装的组件。完成后,点击**更新**以保存配置。 @@ -146,9 +138,7 @@ weight: 2600 ##################################################### ``` -7. 登录 KubeSphere 控制台,在**服务组件**中可以查看不同组件的状态。 - - ![服务组件](/images/docs/zh-cn/quickstart/enable-pluggable-components/服务组件.jpg) +7. 登录 KubeSphere 控制台,在**系统组件**中可以查看不同组件的状态。 {{< notice tip >}} diff --git a/content/zh/docs/quick-start/minimal-kubesphere-on-k8s.md b/content/zh/docs/quick-start/minimal-kubesphere-on-k8s.md index ce6ce06ff..f0ab5998d 100644 --- a/content/zh/docs/quick-start/minimal-kubesphere-on-k8s.md +++ b/content/zh/docs/quick-start/minimal-kubesphere-on-k8s.md @@ -10,7 +10,7 @@ weight: 2200 ## Prerequisites -- 如需在 Kubernetes 上安装 KubeSphere v3.1.1,您的 Kubernetes 版本必须为:1.17.x、1.18.x、1.19.x 或 1.20.x。 +- 如需在 Kubernetes 上安装 KubeSphere 3.2.1,您的 Kubernetes 版本必须为:1.19.x、1.20.x、1.21.x 或 1.22.x(实验性支持)。 - 确保您的机器满足最低硬件要求:CPU > 1 核,内存 > 2 GB。 - 在安装之前,需要配置 Kubernetes 集群中的**默认**存储类型。 @@ -28,9 +28,9 @@ weight: 2200 1. 执行以下命令开始安装: ```bash - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml - kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml + kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml ``` 2. 检查安装日志: @@ -47,9 +47,8 @@ weight: 2200 4. 确保在安全组中打开了端口 `30880`,并通过 NodePort `(IP:30880)` 使用默认帐户和密码 `(admin/P@88w0rd)` 访问 Web 控制台。 -5. 登录控制台后,您可以在**服务组件**中检查不同组件的状态。如果要使用相关服务,可能需要等待某些组件启动并运行。 +5. 登录控制台后,您可以在**系统组件**中检查不同组件的状态。如果要使用相关服务,可能需要等待某些组件启动并运行。 - ![kubesphere-components](/images/docs/zh-cn/quickstart/minimal-kubesphere-on-k8s/kubesphere-components.png) ## 启用可插拔组件(可选) diff --git a/content/zh/docs/quick-start/wordpress-deployment.md b/content/zh/docs/quick-start/wordpress-deployment.md index 68bda5a52..a64137722 100644 --- a/content/zh/docs/quick-start/wordpress-deployment.md +++ b/content/zh/docs/quick-start/wordpress-deployment.md @@ -24,7 +24,7 @@ WordPress(使用 PHP 语言编写)是免费、开源的内容管理系统, ## 准备工作 -您需要准备一个 `project regular` 帐户,并在一个项目中赋予该帐户 `operator` 角色(该用户已被邀请参加该项目)。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../create-workspace-and-project/)。 +您需要准备一个 `project regular` 帐户,并在一个项目中赋予该帐户 `operator` 角色(该用户已被邀请参加该项目)。有关更多信息,请参见[创建企业空间、项目、用户和角色](../create-workspace-and-project/)。 ## 预计操作时间 @@ -38,95 +38,63 @@ WordPress(使用 PHP 语言编写)是免费、开源的内容管理系统, 环境变量 `WORDPRESS_DB_PASSWORD` 是连接到 WordPress 数据库的密码。在此步骤中,您需要创建一个密钥来保存将在 MySQL Pod 模板中使用的环境变量。 -1. 使用 `project-regular` 帐户登录 KubeSphere 控制台,访问 `demo-project` 的详情页并导航到**配置中心**。在**密钥**中,点击右侧的**创建**。 - - ![create-secret1](/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret1.png) +1. 使用 `project-regular` 帐户登录 KubeSphere 控制台,访问 `demo-project` 的详情页并导航到**配置**。在**保密字典**中,点击右侧的**创建**。 2. 输入基本信息(例如,将其命名为 `mysql-secret`)并点击**下一步**。在下一页中,选择**类型**为 **Opaque(默认)**,然后点击**添加数据**来添加键值对。输入如下所示的键 (Key) `MYSQL_ROOT_PASSWORD` 和值 (Value) `123456`,点击右下角 **√** 进行确认。完成后,点击**创建**按钮以继续。 - ![key-value](/images/docs/zh-cn/quickstart/wordpress-deployment/key-value.png) #### 创建 WordPress 密钥 -按照以上相同的步骤创建一个名为 `wordpress-secret` 的 WordPress 密钥,输入键 (Key) `WORDPRESS_DB_PASSWORD` 和值 (Value) `123456`。创建的密钥显示在列表中,如下所示: - -![wordpress-secrets1](/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets1.png) +按照以上相同的步骤创建一个名为 `wordpress-secret` 的 WordPress 密钥,输入键 (Key) `WORDPRESS_DB_PASSWORD` 和值 (Value) `123456`。创建的密钥显示在列表中。 ### 步骤 2:创建存储卷 -1. 访问**存储管理**下的**存储卷**,点击**创建**。 - - ![volumes1](/images/docs/zh-cn/quickstart/wordpress-deployment/volumes1.png) +1. 访问**存储**下的**存储卷**,点击**创建**。 2. 输入卷的基本信息(例如,将其命名为 `wordpress-pvc`),然后点击**下一步**。 -3. 在**存储卷设置**中,需要选择一个可用的**存储类型**,并设置**访问模式**和**存储卷容量**。您可以直接使用如下所示的默认值,点击**下一步**继续。 +3. 在**存储卷设置**中,需要选择一个可用的**存储类型**,并设置**访问模式**和**存储卷容量**。您可以直接使用默认值,点击**下一步**继续。 - ![volume-settings1](/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings1.png) - -4. 对于**高级设置**,您无需为当前步骤添加额外的配置,点击**创建**完成即可。 +4. 在**高级设置**中,您无需添加额外的配置,点击**创建**完成即可。 ### 步骤 3:创建应用程序 #### 添加 MySQL 后端组件 -1. 导航到**应用负载**下的**应用**,选择**自制应用**,再点击**构建自制应用**。 - - ![composing-app1](/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app1.png) +1. 导航到**应用负载**下的**应用**,选择**自制应用** > **创建**。 2. 输入基本信息(例如,在应用名称一栏输入 `wordpress`),然后点击**下一步**。 - ![basic-info](/images/docs/zh-cn/quickstart/wordpress-deployment/basic-info.png) - -3. 在**服务组件**中,点击**添加服务**以在应用中设置组件。 - - ![add-service](/images/docs/zh-cn/quickstart/wordpress-deployment/add-service.png) +3. 在**服务设置**中,点击**创建服务**以在应用中设置组件。 4. 设置组件的服务类型为**有状态服务**。 5. 输入有状态服务的名称(例如 **mysql**)并点击**下一步**。 - ![mysqlname](/images/docs/zh-cn/quickstart/wordpress-deployment/mysqlname.png) - -6. 在**容器镜像**中,点击**添加容器镜像**。 - - ![container-image](/images/docs/zh-cn/quickstart/wordpress-deployment/container-image.png) +6. 在**容器组设置**中,点击**添加容器**。 7. 在搜索框中输入 `mysql:5.6`,按下**回车键**,然后点击**使用默认端口**。由于配置还未设置完成,请不要点击右下角的 **√** 按钮。 - ![add-container](/images/docs/zh-cn/quickstart/wordpress-deployment/add-container.png) - {{< notice note >}} 在**高级设置**中,请确保内存限制不小于 1000 Mi,否则 MySQL 可能因内存不足而无法启动。 {{}} 8. 向下滚动到**环境变量**,点击**引用配置文件或密钥**。输入名称 `MYSQL_ROOT_PASSWORD`,然后选择资源 `mysql-secret` 和前面步骤中创建的密钥 `MYSQL_ROOT_PASSWORD`,完成后点击 **√** 保存配置,最后点击**下一步**继续。 - ![environment-var](/images/docs/zh-cn/quickstart/wordpress-deployment/environment-var.png) - -9. 选择**挂载存储**中的**添加存储卷模板**,输入**存储卷名称** (`mysql`) 和**挂载路径**(模式:`读写`,路径:`/var/lib/mysql`)的值,如下所示: - - ![volume-template1](/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template1.png) +9. 选择**存储卷设置**中的**添加存储卷模板**,输入**存储卷名称** (`mysql`) 和**挂载路径**(模式:`读写`,路径:`/var/lib/mysql`)的值。 完成后,点击 **√** 保存设置并点击**下一步**继续。 10. 在**高级设置**中,可以直接点击**添加**,也可以按需选择其他选项。 - ![advanced-setting1](/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting1.png) +11. 现在,MySQL 组件已经添加完成。 -11. 现在,MySQL 组件已经添加完成,如下所示: - - ![mysql-finished](/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-finished.png) #### 添加 WordPress 前端组件 -12. 再次点击**添加服务**,这一次选择**无状态服务**。输入名称 `wordpress` 并点击**下一步**。 +12. 再次点击**创建服务**,选择**无状态服务**。输入名称 `wordpress` 并点击**下一步**。 - ![name-wordpress](/images/docs/zh-cn/quickstart/wordpress-deployment/name-wordpress.png) - -13. 与上述步骤类似,点击**添加容器镜像**,在搜索栏中输入 `wordpress:4.8-apache` 并按下**回车键**,然后点击**使用默认端口**。 - - ![container-image-page](/images/docs/zh-cn/quickstart/wordpress-deployment/container-image-page.png) +13. 与上述步骤类似,点击**添加容器**,在搜索栏中输入 `wordpress:4.8-apache` 并按下**回车键**,然后点击**使用默认端口**。 14. 向下滚动到**环境变量**,点击**引用配置文件或密钥**。这里需要添加两个环境变量,请根据以下截图输入值: @@ -137,57 +105,32 @@ WordPress(使用 PHP 语言编写)是免费、开源的内容管理系统, 对于此处添加的第二个环境变量,该值必须与步骤 5 中创建 MySQL 有状态服务设置的名称完全相同。否则,WordPress 将无法连接到 MySQL 对应的数据库。 {{}} - ![environment-varss](/images/docs/zh-cn/quickstart/wordpress-deployment/environment-varss.png) - 点击 **√** 保存配置,再点击**下一步**继续。 -15. 在**挂载存储**中,点击**添加存储卷**,并**选择已有存储卷**。 - - ![add-volume-page](/images/docs/zh-cn/quickstart/wordpress-deployment/add-volume-page.png) - - ![choose-existing-volume](/images/docs/zh-cn/quickstart/wordpress-deployment/choose-existing-volume.png) +15. 在**存储卷设置**中,点击**挂载存储卷**,并**选择存储卷**。 16. 选择上一步创建的 `wordpress-pvc`,将模式设置为`读写`,并输入挂载路径 `/var/www/html`。点击 **√** 保存,再点击**下一步**继续。 - ![mount-volume-page](/images/docs/zh-cn/quickstart/wordpress-deployment/mount-volume-page.png) - 17. 在**高级设置**中,可以直接点击**添加**创建服务,也可以按需选择其他选项。 - ![advanced1](/images/docs/zh-cn/quickstart/wordpress-deployment/advanced1.png) - 18. 现在,前端组件也已设置完成。点击**下一步**继续。 - ![components-finished](/images/docs/zh-cn/quickstart/wordpress-deployment/components-finished.png) +19. 您可以**路由设置**中设置路由规则(应用路由 Ingress),也可以直接点击**创建**。 -19. 您可以在这里设置路由规则(应用路由 Ingress),也可以直接点击**创建**。 +20. 创建后,应用将显示在应用列表中。 - ![ingress-create](/images/docs/zh-cn/quickstart/wordpress-deployment/ingress-create.png) - -20. 创建后,应用将显示在下面的列表中。 - - ![application-created1](/images/docs/zh-cn/quickstart/wordpress-deployment/application-created1.png) ### 步骤 4:验证资源 -在**工作负载**中,分别检查**部署**和**有状态副本集**中 `wordpress-v1` 和 `mysql-v1` 的状态。如果它们的运行状态如下图所示,就意味着 WordPress 已经成功创建。 - -![wordpress-deployment1](/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment1.png) - -![mysql-running1](/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running1.png) +在**工作负载**中,分别检查**部署**和**有状态副本集**中 `wordpress-v1` 和 `mysql-v1` 的状态。如果它们的运行状态为**运行中**,就意味着 WordPress 已经成功创建。 ### 步骤 5:通过 NodePort 访问 WordPress -1. 若要在集群外访问服务,请首先导航到**服务**。点击 `wordpress` 右侧的三个点后,选择**编辑外网访问**。 - - ![edit-internet-access1](/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access1.png) +1. 若要在集群外访问服务,请首先导航到**服务**。点击 `wordpress` 右侧的三个点后,选择**编辑外部访问**。 2. 在**访问方式**中选择 `NodePort`,然后点击**确定**。 - ![access-method](/images/docs/zh-cn/quickstart/wordpress-deployment/access-method.png) - -3. 点击服务进入详情页,可以看到暴露的端口。 - - ![nodeport-number1](/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number1.png) +3. 点击服务进入详情页,可以在**端口**处查看暴露的端口。 4. 通过 `{Node IP}:{NodePort}` 访问此应用程序,可以看到下图: diff --git a/content/zh/docs/reference/api-docs.md b/content/zh/docs/reference/api-docs.md index fdc687a45..de11620e7 100644 --- a/content/zh/docs/reference/api-docs.md +++ b/content/zh/docs/reference/api-docs.md @@ -48,11 +48,13 @@ curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' \ --data-urlencode 'grant_type=password' \ --data-urlencode 'username=admin' \ --data-urlencode 'password=P#$$w0rd' + --data-urlencode 'client_id=kubesphere' \ + --data-urlencode 'client_secret=kubesphere' ``` {{< notice note >}} -将 `[node ip]` 替换为您的实际 IP 地址。 +将 `[node ip]` 替换为您的实际 IP 地址。你可以在 `ClusterConfiguration` 中配置客户端凭证, 存在一个默认的客户端凭证 `client_id` 和 `client_secret` 的值为 `kubesphere`。 {{}} diff --git a/content/zh/docs/reference/glossary.md b/content/zh/docs/reference/glossary.md index ceadcca4c..da8a2ecf1 100644 --- a/content/zh/docs/reference/glossary.md +++ b/content/zh/docs/reference/glossary.md @@ -11,7 +11,7 @@ weight: 17100 ## 通用术语 - **企业空间**
    - 管理租户工作负载项目(即 Kubernetes 中的企业空间)和 DevOps 工程的逻辑单位。不同团队的成员在企业空间中有不同的权限,可对资源执行不同的操作并共享信息。 + 管理租户工作负载项目(即 Kubernetes 中的企业空间)和 DevOps 项目的逻辑单位。不同团队的成员在企业空间中有不同的权限,可对资源执行不同的操作并共享信息。 - **系统企业空间**
    管理 KubeSphere、Kubernetes 以及可选组件(例如应用商店、服务网格和 DevOps 等)系统项目的特殊企业空间。 - **企业空间成员**
    邀请至企业空间中工作的用户,拥有特定的权限。 - **项目**
    @@ -47,17 +47,17 @@ weight: 17100 - **有状态副本集**
    有状态副本集是用于管理有状态应用程序的工作负载对象,例如 MySQL。有关更多信息,请参见[有状态副本集](https://kubernetes.io/zh/docs/concepts/workloads/controllers/statefulset/)。 -- **守护进程集**
    守护进程集管理多组 Pod 副本,确保所有(或某些)节点运行一个 Pod 的副本,例如 Fluentd 和 Logstash。有关更多信息,请参见[守护进程集](https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/)。 +- **守护进程集**
    守护进程集管理多组容器组副本,确保所有(或某些)节点运行一个容器组的副本,例如 Fluentd 和 Logstash。有关更多信息,请参见[守护进程集](https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/)。 -- **任务**
    任务会创建一个或者多个 Pod,并确保指定数量的 Pod 成功结束。有关更多信息,请参见[任务](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 +- **任务**
    任务会创建一个或者多个容器组,并确保指定数量的容器组成功结束。有关更多信息,请参见[任务](https://kubernetes.io/zh/docs/concepts/workloads/controllers/job/)。 - **定时任务**
    定时任务按照特定时间或特定时间间隔运行任务,定时任务对象就像 crontab 文件中的一行。有关更多信息,请参见[定时任务](https://kubernetes.io/zh/docs/concepts/workloads/controllers/cron-jobs/)。 -- **服务**
    Kubernetes 服务是一种抽象对象,定义一组逻辑 Pod 和访问它们的策略,有时也称为微服务。有关更多信息,请参见[服务](https://kubernetes.io/zh/docs/concepts/services-networking/service/)。 +- **服务**
    Kubernetes 服务是一种抽象对象,定义一组逻辑容器组和访问它们的策略,有时也称为微服务。有关更多信息,请参见[服务](https://kubernetes.io/zh/docs/concepts/services-networking/service/)。 ## DevOps -- **DevOps 工程**
    DevOps 工程用于创建和管理流水线和凭证。 +- **DevOps 项目**
    DevOps 项目用于创建和管理流水线和凭证。 - **SCM**
    源控制管理 (Source Control Management),例如 GitHub 和 Gitlab。 @@ -95,17 +95,17 @@ weight: 17100 监控集群中的相关指标,如节点状态、组件状态、CPU、内存、网络和硬盘等。 - **应用资源监控**
    - 监控平台上的应用程序资源,例如项目和 DevOps 工程的数量,以及特定类型的工作负载和服务的数量。 + 监控平台上的应用程序资源,例如项目和 DevOps 项目的数量,以及特定类型的工作负载和服务的数量。 - **已分配 CPU**
    - 该指标根据节点上 Pod 的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源,工作负载实际正在使用 CPU 资源可能低于该数值。 + 该指标根据节点上容器组的总 CPU 请求数计算得出。它表示节点上为工作负载预留的 CPU 资源,工作负载实际正在使用 CPU 资源可能低于该数值。 -- **已分配内存**
    该指标根据节点上 Pod 的总内存请求计算得出。它表示节点上为工作负载预留的内存资源,工作负载实际正在使用内存资源可能低于该数值。 +- **已分配内存**
    该指标根据节点上容器组的总内存请求计算得出。它表示节点上为工作负载预留的内存资源,工作负载实际正在使用内存资源可能低于该数值。 - **落盘日志收集**
    - 收集容器落盘日志并导出为 stdout,由系统日记收集器收集。 + 日志收集功能允许系统收集保存在存储卷上的容器日志,并将日志发送到标准输出。 -- **通知接收器**
    接收通知的渠道,如电子邮件、企业微信、Slack 和 Webhook。 +- **通知接收器**
    接收通知的渠道,如电子邮件、钉钉、企业微信、Slack 和 Webhook。 ## 网络 @@ -130,23 +130,23 @@ weight: 17100 ## 多集群管理 -- **Host 集群(H 集群)**
    - Host 集群管理 Member 集群,并提供统一的多集群中央控制平面。 +- **主集群(H 集群)**
    + 主集群管理成员集群,并提供统一的多集群中央控制平面。 -- **Member 集群(M 集群)**
    - Member 集群在多集群架构中由 Host 集群统一管理。 +- **成员集群(M 集群)**
    + 成员集群在多集群架构中由主集群统一管理。 - **直接连接**
    - 当 Host 集群的任意节点均可访问 Member 集群的 kube-apiserver 地址时可使用此方式直接连接 Host 集群和 Member 集群。 + 当主集群的任意节点均可访问成员集群的 kube-apiserver 地址时可使用此方式直接连接主集群和成员集群。 - **代理连接**
    - 当 Host 集群无法直接连接 Member 集群时可使用代理方式连接 Host 集群和 Member 集群。 + 当主集群无法直接连接成员集群时可使用代理方式连接主集群和成员集群。 - **jwtSecret**
    - Host 集群和 Member 集群所需的密钥以便二者通信。 + 主集群和成员集群所需的密钥以便二者通信。 - **Tower**
    - 使用代理连接时,Host 集群上会安装 proxy 组件而 Member 集群上会安装 agent,Tower 包含 proxy 和 agent。 + 使用代理连接时,主集群上会安装 proxy 组件而成员集群上会安装 agent,Tower 包含 proxy 和 agent。 - **代理服务地址**
    - 使用代理连接时,Member 集群上的 Tower agent 需要获取的 Host 集群的通信服务地址。 + 使用代理连接时,成员 集群上的 Tower agent 需要获取的主集群的通信服务地址。 diff --git a/content/zh/docs/release/release-v310.md b/content/zh/docs/release/release-v310.md index 13384af48..e18d6db65 100644 --- a/content/zh/docs/release/release-v310.md +++ b/content/zh/docs/release/release-v310.md @@ -100,7 +100,7 @@ weight: 18200 - 可同时启动并运行多条流水线 [#1811](https://github.com/kubesphere/kubesphere/issues/1811) - 支持流水线复制 [#3053](https://github.com/kubesphere/kubesphere/issues/3053) - 新增权限可控的流水线审核机制 [#2483](https://github.com/kubesphere/kubesphere/issues/2483) [#3006](https://github.com/kubesphere/kubesphere/issues/3006) -- 访问 DevOps 工程首页可查看流水线运行状态 [#3007](https://github.com/kubesphere/kubesphere/issues/3007) +- 访问 DevOps 项目首页可查看流水线运行状态 [#3007](https://github.com/kubesphere/kubesphere/issues/3007) - 支持通过流水线 Tag 触发流水线运行 [#3051](https://github.com/kubesphere/kubesphere/issues/3051) - 支持 S2I Webhook [#6](https://github.com/kubesphere/s2ioperator/issues/6) - 优化在输入错误的流水线定时参数时的提示信息 [#2919](https://github.com/kubesphere/kubesphere/issues/2919) @@ -166,7 +166,7 @@ weight: 18200 - 修复通过 Proxy 方式下联邦多集群连接断开的问题 [#3202](https://github.com/kubesphere/kubesphere/pull/3203) - 修正多集群状态显示问题 [#3135](https://github.com/kubesphere/kubesphere/issues/3135) - 修复 DevOps 流水线中无法部署工作负载的问题 [#3112](https://github.com/kubesphere/kubesphere/issues/3112) -- 修复 DevOps 工程管理员无法下载 Artifact 的问题 [#3088](https://github.com/kubesphere/kubesphere/issues/3083) +- 修复 DevOps 项目管理员无法下载 Artifact 的问题 [#3088](https://github.com/kubesphere/kubesphere/issues/3083) - 修复 DevOps 无法创建流水线的问题 [#3105](https://github.com/kubesphere/kubesphere/issues/3105) - 修复多集群下流水线触发的问题 [#2626](https://kubesphere.com.cn/forum/d/2626-webhook-jenkins) - 修复某些情况下编辑流水线时导致的数据丢失问题 [#1270](https://github.com/kubesphere/console/issues/1270) diff --git a/content/zh/docs/release/release-v311.md b/content/zh/docs/release/release-v311.md index 9e7de3dc7..56ec9256d 100644 --- a/content/zh/docs/release/release-v311.md +++ b/content/zh/docs/release/release-v311.md @@ -21,7 +21,7 @@ weight: 18100 - 修复了在特定页面登出时无法正确重定向至登录页面的问题 [kubesphere/console#2009](https://github.com/kubesphere/console/pull/2009) - 修复了容器组模板编辑页面中协议下拉框显示不全的问题 [kubesphere/console#1944](https://github.com/kubesphere/console/pull/1944) - 修复了工作负载创建时探针格式校验的问题 [kubesphere/console#1941](https://github.com/kubesphere/console/pull/1941) -- 修复了企业空间成员详情页面中 DevOps 工程列表展示错误的问题 [#1936](https://github.com/kubesphere/console/pull/1936) +- 修复了企业空间成员详情页面中 DevOps 项目列表展示错误的问题 [#1936](https://github.com/kubesphere/console/pull/1936) - 修复文案错误、缺失的问题 [kubesphere/console#1879](https://github.com/kubesphere/console/pull/1879) [kubesphere/console#1880](https://github.com/kubesphere/console/pull/1880) [kubesphere/console#1895](https://github.com/kubesphere/console/pull/1895) ## 可观测性 @@ -69,9 +69,9 @@ weight: 18100 - 修复了 CI 自动推送镜像时 tag 错误的问题 [kubesphere/console#2037](https://github.com/kubesphere/console/pull/2037) - 修复了在流水线详情页不能返回上一个页面的问题 [kubesphere/console#1996](https://github.com/kubesphere/console/pull/1996) - 修复了镜像构建器弹窗名称不一致的问题 [kubesphere/console#1922](https://github.com/kubesphere/console/pull/1922) -- 修复了在 DevOps 工程中创建 kubeconfig 类型的证书更新被重置的问题 [kubesphere/console#1990](https://github.com/kubesphere/console/pull/1990) +- 修复了在 DevOps 项目中创建 kubeconfig 类型的证书更新被重置的问题 [kubesphere/console#1990](https://github.com/kubesphere/console/pull/1990) - 修复了多分支流水线中信任用户错误的问题 [kubesphere/console#1987](https://github.com/kubesphere/console/pull/1987) -- 修复了 DevOps 工程中流水线 stage label 在配置其他项不保存后被重置的问题 [kubesphere/console#1979](https://github.com/kubesphere/console/pull/1979) +- 修复了 DevOps 项目中流水线 stage label 在配置其他项不保存后被重置的问题 [kubesphere/console#1979](https://github.com/kubesphere/console/pull/1979) - 修复了 shell 和 lable 在流水线中显示不准确的问题 [kubesphere/console#1970](https://github.com/kubesphere/console/pull/1970) - 修复了流水线基础信息对话框显示信息混乱的问题 [kubesphere/console#1955](https://github.com/kubesphere/console/pull/1955) - 修复了多分支流水线运行 API 错误的问题 [kubesphere/console#1954](https://github.com/kubesphere/console/pull/1954) diff --git a/content/zh/docs/release/release-v320.md b/content/zh/docs/release/release-v320.md new file mode 100644 index 000000000..4ebf04e41 --- /dev/null +++ b/content/zh/docs/release/release-v320.md @@ -0,0 +1,178 @@ +--- +title: "3.2.0 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.2.0 版本说明" +linkTitle: "3.2.0 版本说明" +weight: 18100 +--- + +## 多租户和多群集 + +### 新特性 + +- 新增支持在多群集场景设置主群集名称(默认值为 `host`)。([#4211](https://github.com/kubesphere/kubesphere/pull/4211),[@yuswift](https://github.com/yuswift)) +- 新增支持在单群集场景设置群集名称。([#4220](https://github.com/kubesphere/kubesphere/pull/4220),[@yuswift](https://github.com/yuswift)) +- 新增支持使用 `globals.config` 初始化默认群集名称。([#2283](https://github.com/kubesphere/console/pull/2283),[@harrisonliu5](https://github.com/harrisonliu5)) +- 新增支持创建部署时跨多个群集调度容器组副本。([#2191](https://github.com/kubesphere/console/pull/2191),[@weili520](https://github.com/weili520)) +- 新增支持在项目详情页面修改群集权重。([#2192](https://github.com/kubesphere/console/pull/2192),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 修复**群集管理**的**创建部署**对话框中可以通过输入项目名称选择多群集项目的问题。([#2125](https://github.com/kubesphere/console/pull/2125),[@fuchunlan](https://github.com/fuchunlan)) +- 修复编辑企业空间或群集基本信息时发生的错误。([#2188](https://github.com/kubesphere/console/pull/2188), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 移除主群集**基本信息**页面上有关已删除集群的信息。([#2211](https://github.com/kubesphere/console/pull/2211),[@fuchunlan](https://github.com/fuchunlan)) +- 新增支持在多集群项目中对服务进行排序和编辑。([#2167](https://github.com/kubesphere/console/pull/2167),[@harrisonliu5](https://github.com/harrisonliu5)) +- 重构多群集项目的网关功能。([#2275](https://github.com/kubesphere/console/pull/2275),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复删除企业空间后多群集项目无法删除的问题。([#4365](https://github.com/kubesphere/kubesphere/pull/4365),[@wansir](https://github.com/wansir)) + +## 可观察性 + +### 新特性 + +- 新增支持与 Elasticsearch 进行 HTTPS 通信。([#4176](https://github.com/kubesphere/kubesphere/pull/4176),[@wanjunlei](https://github.com/wanjunlei)) +- 新增支持调度 GPU 工作负载设置 GPU 类型。([#4225](https://github.com/kubesphere/kubesphere/pull/4225),[@zhu733756](https://github.com/zhu733756)) +- 新增支持验证通知设置。([#4216](https://github.com/kubesphere/kubesphere/pull/4216),[@wenchajun](https://github.com/wenchajun)) +- 新增支持通过指定监控面板 URL 或上传 Grafana 监控面板 JSON 配置文件导入 Grafana 监控面板。KubeSphere 自动将 Grafana 监控面板转换为 KubeSphere 集群监控面板。([#4194](https://github.com/kubesphere/kubesphere/pull/4194),[@zhu733756](https://github.com/zhu733756)) +- 新增支持在**自定义监控**页面创建 Grafana 监控面板。([#2214](https://github.com/kubesphere/console/pull/2214),[@harrisonliu5](https://github.com/harrisonliu5)) +- 优化**通知配置**功能。([#2261](https://github.com/kubesphere/console/pull/2261), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 新增支持在**编辑默认容器配额**对话框中设置 GPU 限制。([#2253](https://github.com/kubesphere/console/pull/2253),[@weili520](https://github.com/weili520)) +- 新增默认 GPU 监控面板。([#2580](https://github.com/kubesphere/console/pull/2580),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在 etcd 监控页面对 etcd leader 增加 **Leader** 标签。([#2445](https://github.com/kubesphere/console/pull/2445), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) + +### 问题修复 + +- 修复**告警消息**页面和告警策略详情页面容器组信息错误的问题。([#2215](https://github.com/kubesphere/console/pull/2215),[@harrisonliu5](https://github.com/harrisonliu5)) + +## 验证和授权 + +### 新特性 + +- 新增内置 OAuth 2.0 服务器(支持 OpenID Connect)。([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) +- 移除使用外部身份认证提供者时所需的信息确认过程。([#4238](https://github.com/kubesphere/kubesphere/pull/4238),[@wansir](https://github.com/wansir)) + +### 问题修复 + +- 修复登录历史记录中源 IP 地址错误的问题。([#4331](https://github.com/kubesphere/kubesphere/pull/4331),[@wansir](https://github.com/wansir)) + +## 存储 + +### 新特性 + +- 变更用于确定是否允许存储卷克隆、存储卷快照和存储卷扩展的参数。([#2199](https://github.com/kubesphere/console/pull/2199),[@weili520](https://github.com/weili520)) +- 新增支持创建存储卷时设置存储卷绑定模式。([#2220](https://github.com/kubesphere/console/pull/2220),[@weili520](https://github.com/weili520)) +- 新增存储卷实例管理功能。([#2226](https://github.com/kubesphere/console/pull/2226),[@weili520](https://github.com/weili520)) +- 新增支持多个存储卷快照类型。用户可以在创建存储卷快照时选择快照类型。([#2218](https://github.com/kubesphere/console/pull/2218),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 更改**存储卷设置**页签上存储卷访问模式的可选项。([#2348](https://github.com/kubesphere/console/pull/2348),[@live77](https://github.com/live77)) + +## 网络 + +### 新特性 + +- 在应用路由列表页面新增应用路由排序、路由规则编辑和注解编辑功能。([#2165](https://github.com/kubesphere/console/pull/2165),[@harrisonliu5](https://github.com/harrisonliu5)) +- 重构群集网关和项目网关功能。([#2262](https://github.com/kubesphere/console/pull/2262),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在路由规则创建过程中新增服务名称自动补全功能。([#2196](https://github.com/kubesphere/console/pull/2196),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 对 ks-console 进行了以下 DNS 优化: + - 直接使用 ks-apiserver 服务的名称作为 API URL,不再使用 `ks-apiserver.kubesphere-system.svc`。 + - 新增 DNS 缓存插件 (dnscache) 用于缓存 DNS 结果。([#2435](https://github.com/kubesphere/console/pull/2435),[@live77](https://github.com/live77)) + +### 问题修复 + +- 在**启用网关**对话框中新增**取消**按钮。([#2245](https://github.com/kubesphere/console/pull/2245),[@weili520](https://github.com/weili520)) + +## 应用和应用商店 + +### 新特性 + +- 新增支持在应用仓库创建和编辑过程中设置同步时间间隔。([#2311](https://github.com/kubesphere/console/pull/2311), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 在应用商店增加免责声明。([#2173](https://github.com/kubesphere/console/pull/2173), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 新增支持将社区开发的 Helm chart 动态加载到应用商店。([#4250](https://github.com/kubesphere/kubesphere/pull/4250),[@xyz-li](https://github.com/xyz-li)) + +### 问题修复 + +- 修复调用 `GetKubeSphereStats` 时 `kubesphere_app_template_count` 的值始终为 `0` 的问题。([#4130](https://github.com/kubesphere/kubesphere/pull/4130),[@Hanamichi](https://github.com/ks-ci-bohttps://github.com/x893675)) + +## DevOps + +### 新特性 + +- 设置系统在当前流水线不是多分支流水线时隐藏**运行记录**页签的**分支**列。([#2379](https://github.com/kubesphere/console/pull/2379),[@live77](https://github.com/live77)) +- 新增自动从 ConfigMaps 加载 Jenkins 配置的功能。([#75](https://github.com/kubesphere/ks-devops/pull/75),[@JohnNiang](https://github.com/JohnNiang)) +- 新增支持通过操纵 CRD 而不是调用 Jenkins API 来触发流水线。([#41](https://github.com/kubesphere/ks-devops/issues/41), [@rick](https://github.com/LinuxSuRen)) +- 新增支持基于 containerd 的流水线。([#171](https://github.com/kubesphere/ks-devops/pull/171), [@rick](https://github.com/LinuxSuRen)) +- 将 Jenkins 任务元数据添加流水线注解中。([#254](https://github.com/kubesphere/ks-devops/issues/254),[@JohnNiang](https://github.com/JohnNiang)) + +### 问题修复 + +- 修复参数值过长时凭证创建和更新失败的问题。([#123](https://github.com/kubesphere/ks-devops/pull/123),[@shihh](https://github.com/shihaoH)) +- 修复打开并行流水线**运行记录**页签时 ks-apiserver 崩溃的问题。([#93](https://github.com/kubesphere/ks-devops/pull/93),[@JohnNiang](https://github.com/JohnNiang)) + +### 依赖项升级 + +- 升级 Configuration as Code 版本到 1.53。([#42](https://github.com/kubesphere/ks-jenkins/pull/42), [@rick](https://github.com/LinuxSuRen)) + +## 安装 + +### 新特性 + +- 新增支持 Kubernetes 1.21.5 和 1.22.1。([#634](https://github.com/kubesphere/kubekey/pull/634),[@pixiake](https://github.com/pixiake)) +- 新增支持自动设置容器运行时。([#738](https://github.com/kubesphere/kubekey/pull/738),[@pixiake](https://github.com/pixiake)) +- 新增支持自动更新 Kubernetes 证书。([#705](https://github.com/kubesphere/kubekey/pull/705),[@pixiake](https://github.com/pixiake)) +- 新增支持使用二进制文件安装 Docker 和 conatinerd。([#657](https://github.com/kubesphere/kubekey/pull/657),[@pixiake](https://github.com/pixiake)) +- 新增支持 Flannel VxLAN 和直接路由。([#606](https://github.com/kubesphere/kubekey/pull/606),[@kinglong08](https://github.com/kinglong08)) +- 新增支持使用二进制文件部署 etcd。([#634](https://github.com/kubesphere/kubekey/pull/634),[@pixiake](https://github.com/pixiake)) +- 新增内部负载均衡器用于部署高可用系统。([#567](https://github.com/kubesphere/kubekey/pull/567),[@24sama](https://github.com/24sama)) + +### 问题修复 + +- 修复 `runtime.RawExtension` 序列化错误。([#731](https://github.com/kubesphere/kubekey/pull/731),[@pixiake](https://github.com/pixiake)) +- 修复群集升级期间出现的空指针错误。([#684](https://github.com/kubesphere/kubekey/pull/684),[@24sama](https://github.com/24sama)) +- 新增支持更新 Kubernetes 1.20.0 及以上版本的证书。([#690](https://github.com/kubesphere/kubekey/pull/690),[@24sama](https://github.com/24sama)) +- 修复 DNS 地址配置错误。([#637](https://github.com/kubesphere/kubekey/pull/637),[@pixiake](https://github.com/pixiake)) +- 修复缺少默认网关地址时出现的群集创建错误。([#661](https://github.com/kubesphere/kubekey/pull/661),[@liulangwa](https://github.com/liulangwa)) + +## 用户体验 + +- 修复语言错误并优化措辞。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 修复错误的功能说明。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 删除硬编码和拼接 UI 字符串,以更好地支持 UI 本地化和国际化。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 添加条件语句以显示正确的英文单复数形式。([@Patrick-LuoYu](https://github.com/Patrick-LuoYu)、[@Felixnoo](https://github.com/Felixnoo)、[@serenashe](https://github.com/serenashe)) +- 优化**创建部署**对话框中的**容器组调度规则**区域。([#2170](https://github.com/kubesphere/console/pull/2170),[@qinyueshang](https://github.com/qinyueshang)) +- 修复**编辑项目配额**中配额值设置为无穷大时值变为 `0` 的问题。([#2118](https://github.com/kubesphere/console/pull/2118),[@fuchunlan](https://github.com/fuchunlan)) +- 修复**创建配置字典**对话框中数据条目为空时锤子图标位置不正确的问题。([#2206](https://github.com/kubesphere/console/pull/2206),[@fuchunlan](https://github.com/fuchunlan)) +- 修复项目**概览**页面时间范围下拉列表默认值显示错误的问题。([#2340](https://github.com/kubesphere/console/pull/2340),[@fuchunlan](https://github.com/fuchunlan)) +- 修复 `referer` URL 包含 & 字符时登录重定向失败的问题。([#2194](https://github.com/kubesphere/console/pull/2194),[@harrisonliu5](https://github.com/harrisonliu5)) +- 在自定义监控面板创建页面将 **1 hours** 修改为 **1 hour**。([#2276](https://github.com/kubesphere/console/pull/2276),[@live77](https://github.com/live77)) +- 修复服务列表页面服务类型显示错误的问题。([#2178](https://github.com/kubesphere/console/pull/2178), [@xuliwenwenwen](https://github.com/xuliwenwenwen)) +- 修复灰度发布任务详细信息中流量数据显示错误的问题。([#2422](https://github.com/kubesphere/console/pull/2422),[@harrisonliu5](https://github.com/harrisonliu5)) +- 解决**编辑项目配额**对话框中无法设置带两位小数或大于 8 的值的问题。([#2127](https://github.com/kubesphere/console/pull/2127),[@weili520](https://github.com/weili520)) +- 允许通过单击窗口其他区域关闭**关于**对话框。([#2114](https://github.com/kubesphere/console/pull/2114),[@fuchunlan](https://github.com/fuchunlan)) +- 优化项目标题,使光标悬停在项目标题上时变为手形。([#2128](https://github.com/kubesphere/console/pull/2128),[@fuchunlan](https://github.com/fuchunlan)) +- 新增支持在**创建部署**对话框的**环境变量**区域创建配置字典和保密字典。([#2227](https://github.com/kubesphere/console/pull/2227),[@harrisonliu5](https://github.com/harrisonliu5)) +- 新增支持在**创建部署**对话框中设置容器组注解。([#2129](https://github.com/kubesphere/console/pull/2129),[@harrisonliu5](https://github.com/harrisonliu5)) +- 允许域名以星号(*)开头。([#2432](https://github.com/kubesphere/console/pull/2432),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 新增支持在**创建部署**对话框搜索 Harbor 镜像。([#2132](https://github.com/kubesphere/console/pull/2132),[@wengzhisong-hz](https://github.com/wengzhisong-hz)) +- 新增支持为初始化容器挂载存储卷。([#2166](https://github.com/kubesphere/console/pull/2166),[@Sigboom](https://github.com/Sigboom)) +- 移除存储卷扩展中过程中工作负载自动重新启动的功能。([#4121](https://github.com/kubesphere/kubesphere/pull/4121),[@wenhuwang](https://github.com/wenhuwang)) + +## API + +- 弃用 router API v1alpha2 版本。([#4193](https://github.com/kubesphere/kubesphere/pull/4193),[@RolandMa1986](https://github.com/RolandMa1986)) +- 将流水线 API 版本从 v2 升级到 v3。([#2323](https://github.com/kubesphere/console/pull/2323),[@harrisonliu5](https://github.com/harrisonliu5)) +- 更改保密字典校验 API。([#2368](https://github.com/kubesphere/console/pull/2368),[@harrisonliu5](https://github.com/harrisonliu5)) +- OAuth2 Token endpoint 需要客户端凭证。([#3525](https://github.com/kubesphere/kubesphere/pull/3525),[@wansir](https://github.com/wansir)) + +## 组件更改 + +- kubefed: v0.7.0 -> v0.8.1 +- prometheus-operator: v0.42.1 -> v0.43.2 +- notification-manager: v1.0.0 -> v1.4.0 +- fluent-bit: v1.6.9 -> v1.8.3 +- kube-events: v0.1.0 -> v0.3.0 +- kube-auditing: v0.1.2 -> v0.2.0 +- istio: 1.6.10 -> 1.11.1 +- jaeger: 1.17 -> 1.27 +- kiali: v1.26.1 -> v1.38 +- KubeEdge: v1.6.2 -> 1.7.2 \ No newline at end of file diff --git a/content/zh/docs/release/release-v321.md b/content/zh/docs/release/release-v321.md new file mode 100644 index 000000000..e0245e9e1 --- /dev/null +++ b/content/zh/docs/release/release-v321.md @@ -0,0 +1,41 @@ +--- +title: "3.2.1 版本说明" +keywords: "Kubernetes, KubeSphere, 版本说明" +description: "KubeSphere 3.2.1 版本说明" +linkTitle: "3.2.1 版本说明" +weight: 18099 +--- + +## 功能优化与问题修复 + +### 功能优化 + +- 新增支持按状态过滤容器组。([#4434](https://github.com/kubesphere/kubesphere/pull/4434),[@iawia002](https://github.com/iawia002),[#2620](https://github.com/kubesphere/console/pull/2620),[@weili520](https://github.com/weili520)) +- 在镜像构建器创建对话框中增加不支持 containerd 的提示。([#2734](https://github.com/kubesphere/console/pull/2734),[@weili520](https://github.com/weili520)) +- 在**编辑项目配额**对话框中增加可用配额信息。([#2619](https://github.com/kubesphere/console/pull/2619),[@weili520](https://github.com/weili520)) + +### 问题修复 + +- 更改密码校验规则以阻止不包含大写字母的密码。([#4481](https://github.com/kubesphere/kubesphere/pull/4481),[@live77](https://github.com/live77)) +- 修复 KubeSphere 上不存在相关用户信息时,无法使用来自 LDAP 的用户登录的问题。([#4436](https://github.com/kubesphere/kubesphere/pull/4436),[@RolandMa1986](https://github.com/RolandMa1986)) +- 修复无法获取集群网关指标信息的问题。([#4457](https://github.com/kubesphere/kubesphere/pull/4457),[@RolandMa1986](https://github.com/RolandMa1986)) +- 修复存储卷列表访问模式显示不正确的问题。([#2686](https://github.com/kubesphere/console/pull/2686),[@weili520](https://github.com/weili520)) +- 移除**网关设置**页面的**更新**按钮。([#2608](https://github.com/kubesphere/console/pull/2608),[@weili520](https://github.com/weili520)) +- 修复时间范围选择下拉列表显示错误的问题。([#2715](https://github.com/kubesphere/console/pull/2715),[@weili520](https://github.com/weili520)) +- 修复保密字典数据文本过长时文本显示不正确的问题。([#2600](https://github.com/kubesphere/console/pull/2600),[@weili520](https://github.com/weili520)) +- 修复挂载存储卷模板时有状态副本集创建失败的问题。([#2730](https://github.com/kubesphere/console/pull/2730),[@weili520](https://github.com/weili520)) +- 修复用户没有查看群集信息的权限时系统无法获取集群网关信息的问题。([#2695](https://github.com/kubesphere/console/pull/2695),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复流水线状态和运行记录无法自动更新的问题。([#2594](https://github.com/kubesphere/console/pull/2594),[@harrisonliu5](https://github.com/harrisonliu5)) +- 对 kubernetesDeply 流水线步骤增加该步骤将被弃用的提示。([#2660](https://github.com/kubesphere/console/pull/2660),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复镜像仓库保密字典使用 HTTP 仓库地址时无法通过验证的问题。([#2795](https://github.com/kubesphere/console/pull/2795),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复 Harbor 镜像 URL 错误的问题。([#2784](https://github.com/kubesphere/console/pull/2784),[@harrisonliu5](https://github.com/harrisonliu5)) +- 修复日志搜索结果显示错误的问题。([#2598](https://github.com/kubesphere/console/pull/2598),[@weili520](https://github.com/weili520)) +- 修复存储卷实例 YAML 配置中的错误。([#2629](https://github.com/kubesphere/console/pull/2629),[@weili520](https://github.com/weili520)) +- 修复**编辑项目配额**对话框中可用企业空间配额显示不正确的问题。([#2613](https://github.com/kubesphere/console/pull/2613),[@weili520](https://github.com/weili520)) +- 修复**监控**对话框中时间范围选择下拉列表功能不正常的问题。([#2722](https://github.com/kubesphere/console/pull/2722),[@weili520](https://github.com/weili520)) +- 修复部署创建页面可用配额显示不正确的问题。([#2668](https://github.com/kubesphere/console/pull/2668),[@weili520](https://github.com/weili520)) +- 将文档地址更改为 [kubesphere.io](http://kubesphere.io) 和 [kubesphere.com.cn](http://kubesphere.io)。([#2628](https://github.com/kubesphere/console/pull/2628),[@weili520](https://github.com/weili520)) +- 修复无法修改部署存储卷设置的问题。([#2656](https://github.com/kubesphere/console/pull/2656),[@weili520](https://github.com/weili520)) +- 修复浏览器语言必须为英文、简体中文或繁体中文时才能访问容器终端的问题。([#2702](https://github.com/kubesphere/console/pull/2702),[@weili520](https://github.com/weili520)) +- 修复部署编辑对话框中存储卷状态显示不正确的问题。([#2622](https://github.com/kubesphere/console/pull/2622),[@weili520](https://github.com/weili520)) +- 移除凭证详情页面显示的标签。([#2621](https://github.com/kubesphere/console/pull/2621),[@123liubao](https://github.com/123liubao)) \ No newline at end of file diff --git a/content/zh/docs/toolbox/_index.md b/content/zh/docs/toolbox/_index.md index 304dd3248..d81779be5 100644 --- a/content/zh/docs/toolbox/_index.md +++ b/content/zh/docs/toolbox/_index.md @@ -11,5 +11,3 @@ icon: "/images/docs/docs.svg" --- KubeSphere 通过工具箱提供几种重要功能。本章演示了如何使用 KubeSphere 工具箱查询事件、日志和审计日志,查看资源消费情况,以及如何通过 Web Kubectl 运行命令。 - -![toolbox](/images/docs/zh-cn/toolbox/index/toolbox.PNG) \ No newline at end of file diff --git a/content/zh/docs/toolbox/auditing/auditing-query.md b/content/zh/docs/toolbox/auditing/auditing-query.md index 28a651eda..ae333b105 100644 --- a/content/zh/docs/toolbox/auditing/auditing-query.md +++ b/content/zh/docs/toolbox/auditing/auditing-query.md @@ -14,23 +14,21 @@ KubeSphere 支持租户隔离的审计日志查询。本教程演示了如何使 ## 进入查询界面 -1. 所有用户都可以使用该查询功能。使用任意帐户登录控制台,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择**操作审计**。 +1. 所有用户都可以使用该查询功能。使用任意帐户登录控制台,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择**审计日志查询**。 {{< notice note >}} 任意帐户都有权限查询审计日志,但每个帐户能查看的日志有区别。 -- 如果一个帐户有权限查看项目中的资源,该帐户便可以查看此项目中发生的审计日志,例如在项目中创建工作负载。 -- 如果一个帐户有权限在企业空间中列出项目,该帐户便可以查看此企业空间(而非项目)中发生的审计日志,例如在企业空间中创建项目。 -- 如果一个帐户有权限在集群中列出项目,该帐户便可以查看此集群(而非企业空间和项目)中发生的审计日志,例如在集群中创建企业空间。 +- 如果一个用户有权限查看项目中的资源,该帐户便可以查看此项目中发生的审计日志,例如在项目中创建工作负载。 +- 如果一个用户有权限在企业空间中列出项目,该帐户便可以查看此企业空间(而非项目)中发生的审计日志,例如在企业空间中创建项目。 +- 如果一个用户有权限在集群中列出项目,该帐户便可以查看此集群(而非企业空间和项目)中发生的审计日志,例如在集群中创建企业空间。 {{}} 2. 在弹出窗口中,您可以查看最近 12 小时内审计日志总数的趋势。 - ![操作审计](/images/docs/zh-cn/toolbox/auditing/auditing-logs/操作审计.png) - -3. **操作审计**控制台支持以下查询参数: +3. **审计日志查询**控制台支持以下查询参数: @@ -82,10 +80,6 @@ KubeSphere 支持租户隔离的审计日志查询。本教程演示了如何使 ## 输入查询参数 -1. 选择一个过滤器,输入您想搜索的关键字。例如,查询包含 `services` 创建信息的审计日志,如下方截图所示: - - ![过滤审计日志](/images/docs/zh-cn/toolbox/auditing/auditing-logs/过滤审计日志.png) +1. 选择一个过滤器,输入您想搜索的关键字。例如,查询包含 `services` 创建信息的审计日志。 2. 点击列表中的任一结果,您便可以查看审计日志的详细信息。 - - ![审计日志详情](/images/docs/zh-cn/toolbox/auditing/auditing-logs/审计日志详情.png) diff --git a/content/zh/docs/toolbox/auditing/auditing-receive-customize.md b/content/zh/docs/toolbox/auditing/auditing-receive-customize.md index 73fc45ba5..9550d7cd0 100644 --- a/content/zh/docs/toolbox/auditing/auditing-receive-customize.md +++ b/content/zh/docs/toolbox/auditing/auditing-receive-customize.md @@ -34,7 +34,7 @@ kubectl edit cm -n kubesphere-system kubesphere-config ## 接收来自 Kubernetes 的审计日志 -要使 KubeSphere 审计日志系统接收来自 Kubernetes 的审计日志,您需要向 `/etc/kubernetes/manifests/kube-apiserver.yaml` 添加 Kubernetes 审计策略文件和 Kubernetes 审计 Webhook 配置文件,如下所示。 +要使 KubeSphere 审计日志系统接收来自 Kubernetes 的审计日志,您需要向 `/etc/kubernetes/manifests/kube-apiserver.yaml` 添加 Kubernetes 审计策略文件和 Kubernetes 审计 Webhook 配置文件。 ### 审计策略 @@ -126,7 +126,7 @@ spec: ``` {{< notice tip >}} -您也可以使用拥有 `platform-admin` 角色的帐户登录控制台,在**集群管理**页面转到**自定义资源 CRD**,搜索 `Webhook`,直接编辑 `kube-auditing-webhook`。 +您也可以使用拥有 `platform-admin` 角色的用户登录控制台,在**集群管理**页面转到 **CRD**,搜索 `Webhook`,直接编辑 `kube-auditing-webhook`。 {{}} diff --git a/content/zh/docs/toolbox/auditing/auditing-rule.md b/content/zh/docs/toolbox/auditing/auditing-rule.md index 828dd486f..5d98ee5ac 100644 --- a/content/zh/docs/toolbox/auditing/auditing-rule.md +++ b/content/zh/docs/toolbox/auditing/auditing-rule.md @@ -8,11 +8,7 @@ weight: 15320 审计规则定义了处理审计日志的策略。KubeSphere 审计日志为用户提供两种 CRD 规则(`archiving-rule` 和 `alerting-rule`)以供自定义。 -启用 [KubeSphere 审计日志](../../../pluggable-components/auditing-logs/)后,使用拥有 `platform-admin` 角色的帐户登录控制台。在**集群管理**页面转到**自定义资源 CRD**,在搜索栏中输入 `rules.auditing.kubesphere.io`。点击搜索结果 **Rule**,您便可以看到这两种 CRD 规则,如下所示。 - -![审计 CRD](/images/docs/zh-cn/toolbox/auditing/auditing-rules/auditing-crd.PNG) - -![告警和归档规则](/images/docs/zh-cn/toolbox/auditing/auditing-rules/alerting-archiving-rule.PNG) +启用 [KubeSphere 审计日志](../../../pluggable-components/auditing-logs/)后,使用拥有 `platform-admin` 角色的用户登录控制台。在**集群管理**页面转到 **CRD**,在搜索栏中输入 `rules.auditing.kubesphere.io`。点击搜索结果 **Rule**,您便可以看到这两种 CRD 规则。 下方是部分规则的示例。 @@ -94,7 +90,7 @@ spec: 过滤器 | 描述信息 --- | --- `Workspace` | 发生审计事件的企业空间。 - `Devops` | 发生审计事件的 DevOps 工程。 + `DevOps` | 发生审计事件的 DevOps 项目。 `Level` | 审计日志的级别。 `RequestURI` | RequestURI 是由客户端发送至服务器的请求 URI。 `Verb` | 与该请求相关联的动词。 diff --git a/content/zh/docs/toolbox/events-query.md b/content/zh/docs/toolbox/events-query.md index d3fca2647..e22c29fb7 100644 --- a/content/zh/docs/toolbox/events-query.md +++ b/content/zh/docs/toolbox/events-query.md @@ -10,18 +10,22 @@ Kubernetes 事件系统用于深入了解集群内部发生的事件,KubeSpher 本指南演示了如何进行多层级、细粒度的事件查询,以追踪服务组件的状态。 +## 视频演示 + + + ## 准备工作 需要启用 [KubeSphere 事件系统](../../pluggable-components/events/)。 ## 查询事件 -1. 所有用户都可以使用事件查询功能。使用任意帐户登录控制台,在右下角的 上悬停,然后在弹出菜单中选择**事件查询**。 +1. 所有用户都可以使用事件查询功能。使用任意帐户登录控制台,在右下角的 上悬停,然后在弹出菜单中选择**资源事件查询**。 2. 在弹出窗口中,您可以看到该帐户有权限查看的事件数量。 - ![event-search](/images/docs/zh-cn/toolbox/event-query/event-search.png) - {{< notice note >}} - 如果您启用了[多集群功能](../../multicluster-management/),KubeSphere 支持对每个集群分别进行事件查询。您可以点击搜索栏左侧的 ,然后选择一个目标集群。 @@ -32,12 +36,8 @@ Kubernetes 事件系统用于深入了解集群内部发生的事件,KubeSpher 3. 您可以点击搜索栏并输入搜索条件,可以按照消息、企业空间、项目、资源类型、资源名称、原因、类别或时间范围搜索事件(例如,输入`时间范围:最近 10 分钟`来搜索最近 10 分钟的事件)。 - ![event-search-list](/images/docs/zh-cn/toolbox/event-query/event-search-list.png) - 4. 点击列表中的任一查询结果,可以查看该结果的原始信息,便于开发者分析和排除故障。 - ![event-details](/images/docs/zh-cn/toolbox/event-query/event-details.png) - {{< notice note >}} 事件查询界面支持每 5 秒、10 秒或 15 秒动态刷新一次。 diff --git a/content/zh/docs/toolbox/log-query.md b/content/zh/docs/toolbox/log-query.md index 93a51b238..9544f88f6 100644 --- a/content/zh/docs/toolbox/log-query.md +++ b/content/zh/docs/toolbox/log-query.md @@ -10,6 +10,12 @@ weight: 15100 本教程演示了如何使用日志查询功能,包括界面、搜索参数和详情页面。 +## 视频演示 + + + ## 准备工作 您需要启用 [KubeSphere 日志系统](../../pluggable-components/logging/)。 @@ -20,19 +26,17 @@ weight: 15100 2. 在弹出窗口中,您可以看到日志数量的时间直方图、集群选择下拉列表以及日志查询栏。 - ![log-search](/images/docs/zh-cn/toolbox/log-query/log-search.png) - {{< notice note >}} +​ {{< notice note >}} - 如果您启用了[多集群功能](../../multicluster-management/),KubeSphere 支持对每个集群分别进行日志查询。您可以点击搜索栏左侧的 切换目标集群。 + - KubeSphere 默认存储最近七天内的日志。 {{}} 3. 您可以点击搜索栏并输入搜索条件,可以按照消息、企业空间、项目、资源类型、资源名称、原因、类别或时间范围搜索事件(例如,输入`时间范围:最近 10 分钟`来搜索最近 10 分钟的事件)。或者,点击时间直方图中的柱状图,KubeSphere 会使用该柱状图的时间范围进行日志查询。 - ![log-search-list](/images/docs/zh-cn/toolbox/log-query/log-search-list.png) - {{< notice note >}} - 关键字字段支持关键字组合查询。例如,您可以同时使用 `Error`、`Fail`、`Fatal`、`Exception` 和 `Warning` 来查询所有异常日志。 @@ -46,12 +50,8 @@ weight: 15100 1. 您可以输入多个条件来缩小搜索结果。 - ![log-search-conditions](/images/docs/zh-cn/toolbox/log-query/log-search-conditions.png) - 2. 点击列表中的任一结果,进入它的详情页面,查看该容器组 (Pod) 的日志,包括右侧的完整内容,便于开发者分析和排除故障。 - ![log-search-details-page](/images/docs/zh-cn/toolbox/log-query/log-search-details-page.png) - {{< notice note >}} - 日志查询界面支持每 5 秒、10 秒或 15 秒动态刷新一次。 @@ -66,10 +66,4 @@ weight: 15100 在左侧面板,您可以点击 查看 Pod 详情页面或容器详情页面。 -下图是 Pod 详情页面示例: - -![pod-details-page](/images/docs/zh-cn/toolbox/log-query/pod-details-page.png) - -下图是容器详情页面示例。您可以点击右上角的**终端**打开终端为容器排除故障。 - -![container-detail-page](/images/docs/zh-cn/toolbox/log-query/container-detail-page.png) \ No newline at end of file +您可以点击右上角的**终端**打开终端为容器排除故障。 diff --git a/content/zh/docs/toolbox/metering-and-billing/enable-billing.md b/content/zh/docs/toolbox/metering-and-billing/enable-billing.md index 1b2d58cf2..bede16d80 100644 --- a/content/zh/docs/toolbox/metering-and-billing/enable-billing.md +++ b/content/zh/docs/toolbox/metering-and-billing/enable-billing.md @@ -81,5 +81,3 @@ weight: 15420 ``` 4. 在**资源消费统计**页面,您可以看到资源的消费信息。 - - ![计量计费信息](/images/docs/zh-cn/toolbox/metering-and-billing/enable-billing/计量计费信息.png) \ No newline at end of file diff --git a/content/zh/docs/toolbox/metering-and-billing/view-resource-consumption.md b/content/zh/docs/toolbox/metering-and-billing/view-resource-consumption.md index af4baa69e..18ff8b6d5 100644 --- a/content/zh/docs/toolbox/metering-and-billing/view-resource-consumption.md +++ b/content/zh/docs/toolbox/metering-and-billing/view-resource-consumption.md @@ -23,8 +23,6 @@ KubeSphere 计量功能帮助您在不同层级追踪集群或企业空间中的 3. 如果您已经启用[多集群管理](../../../multicluster-management/),则可以在控制面板左侧看到包含 Host 集群和全部 Member 集群的集群列表。如果您未启用该功能,那么列表中只会显示一个 `default` 集群。 - ![cluster-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png) - 在右侧,有三个模块以不同的方式显示资源消费情况。
    @@ -34,26 +32,22 @@ KubeSphere 计量功能帮助您在不同层级追踪集群或企业空间中的 - + - + - +
    描述
    概览资源消费统计 显示自集群创建以来不同资源的消费概览。如果您在 ConfigMap kubesphere-config已经配置资源的价格,则可以看到计费信息。
    截止到昨天的消费历史消费历史 显示截止到昨天的资源消费总况,您也可以自定义时间范围和时间间隔,以查看特定周期内的数据。
    当前包含的资源当前消费 显示过去一小时所选目标对象的资源消费情况。
    - + 4. 在左侧,点击集群名称即可查看集群节点或 Pod 的资源消费详情。 - ![node-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/node-page.png) - - ![pod-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/pod-page.png) - {{< notice note >}} 如需导出 CSV 格式的资源消费统计数据,请勾选左侧的复选框,然后点击 ✓。 @@ -66,12 +60,10 @@ KubeSphere 计量功能帮助您在不同层级追踪集群或企业空间中的 1. 使用 `admin` 用户登录 KubeSphere Web 控制台,点击右下角的 图标,然后选择**资源消费统计**。 -2. 在**企业空间(项目)资源消费情况**一栏,点击**查看消费**。 +2. 在**企业空间资源消费情况**一栏,点击**查看**。 3. 在控制面板左侧,可以看到包含当前集群中全部企业空间的列表。右侧显示所选企业空间的消费详情,其布局与集群消费情况布局类似。 - ![workspace-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png) - {{< notice note >}} 在多集群架构中,如果企业空间中没有分配可用集群,则无法查看企业空间的资源消费情况。有关更多信息,请参阅[集群可见性和授权](../../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)。 @@ -79,7 +71,3 @@ KubeSphere 计量功能帮助您在不同层级追踪集群或企业空间中的 {{}} 4. 在左侧,点击企业空间名称即可查看其项目或工作负载(例如,部署和有状态副本集)的资源消费详情。 - - ![project-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/project-page.png) - - ![deployment-page](/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/deployment-page.png) \ No newline at end of file diff --git a/content/zh/docs/toolbox/web-kubectl.md b/content/zh/docs/toolbox/web-kubectl.md index 2f4c65773..d2f5e4b14 100644 --- a/content/zh/docs/toolbox/web-kubectl.md +++ b/content/zh/docs/toolbox/web-kubectl.md @@ -8,30 +8,22 @@ weight: 15500 Kubectl 是 Kubernetes 命令行工具,您可以用它在 Kubernetes 集群上运行命令。Kubectl 可用于部署应用、查看和管理集群资源、查看日志等。 -KubeSphere 控制台提供 Web Kubectl,方便用户使用。在默认情况下,当前版本中只有被授予 `platform-admin` 角色的帐户(例如默认帐户 `admin`)才有权限使用 Web Kubectl 进行集群资源操作和管理。 +KubeSphere 控制台提供 Web Kubectl,方便用户使用。在默认情况下,当前版本中只有被授予 `platform-admin` 角色的用户(例如默认帐户 `admin`)才有权限使用 Web Kubectl 进行集群资源操作和管理。 本教程演示了如何使用 Web Kubectl 进行集群资源操作和管理。 ## 使用 Web Kubectl -1. 使用被授予 `platform-admin` 角色的帐户登录 KubeSphere,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择 **Kubectl**。 - - ![web-kubectl-enter](/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-enter.PNG) +1. 使用被授予 `platform-admin` 角色的用户登录 KubeSphere,在右下角的**工具箱**图标上悬停,然后在弹出菜单中选择 **kubectl**。 2. 您可以在弹出窗口中看到 Kubectl 界面,如下图所示。如果您启用了多集群功能,则需要先在右上角的下拉列表中选择目标集群。如果未启用多集群功能,则该下拉列表不可见。 - ![web-kubectl-cluster-select](/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-cluster-select.PNG) - 3. 在命令行工具中输入 Kubectl 命令,查询并管理 Kubernetes 集群资源。例如,执行以下命令查询集群中所有 PVC 的状态。 ```bash kubectl get pvc --all-namespaces ``` - ![Web Kubectl 示例](/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-example.PNG) - - ![web-kubectl-example](/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-example.PNG) - 4. 在终端窗口中使用以下语法运行 Kubectl 命令: ```bash diff --git a/content/zh/docs/upgrade/_index.md b/content/zh/docs/upgrade/_index.md index acd12de70..48693317a 100644 --- a/content/zh/docs/upgrade/_index.md +++ b/content/zh/docs/upgrade/_index.md @@ -11,4 +11,4 @@ icon: "/images/docs/docs.svg" --- -本章演示集群管理员如何将 KubeSphere 升级到 v3.1.1。 \ No newline at end of file +本章演示集群管理员如何将 KubeSphere 升级到 3.2.1。 \ No newline at end of file diff --git a/content/zh/docs/upgrade/air-gapped-upgrade-with-ks-installer.md b/content/zh/docs/upgrade/air-gapped-upgrade-with-ks-installer.md index 9e0bf2a59..101b634a5 100644 --- a/content/zh/docs/upgrade/air-gapped-upgrade-with-ks-installer.md +++ b/content/zh/docs/upgrade/air-gapped-upgrade-with-ks-installer.md @@ -1,6 +1,6 @@ --- title: "使用 ks-installer 离线升级" -keywords: "离线环境, 升级, kubesphere, v3.1.1" +keywords: "离线环境, 升级, kubesphere, 3.2.1" description: "使用 ks-installer 和离线包升级 KubeSphere。" linkTitle: "使用 ks-installer 离线升级" weight: 7500 @@ -11,11 +11,11 @@ weight: 7500 ## 准备工作 -- 您需要有一个运行 KubeSphere v3.0.0 的集群。如果您的 KubeSphere 是 v2.1.1 或更早的版本,请先升级至 v3.0.0。 -- 请仔细阅读 [Release Notes for 3.1.1](../../release/release-v311/)。 +- 您需要有一个运行 KubeSphere v3.1.x 的集群。如果您的 KubeSphere 是 v3.0.0 或更早的版本,请先升级至 v3.1.x。 +- 请仔细阅读 [3.2.1 版本说明](../../release/release-v321/)。 - 提前备份所有重要的组件。 - Docker 仓库。您需要有一个 Harbor 或其他 Docker 仓库。有关更多信息,请参见[准备一个私有镜像仓库](../../installing-on-linux/introduction/air-gapped-installation/#步骤-2准备一个私有镜像仓库)。 -- KubeSphere v3.1.1 支持的 Kubernetes 版本:v1.17.x、v1.18.x、v1.19.x 和 v1.20.x。 +- KubeSphere 3.2.1 支持的 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 和 v1.22.x(实验性支持)。 ## 步骤 1:准备安装镜像 @@ -24,19 +24,19 @@ weight: 7500 1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} - 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/#kubesphere-v310-镜像清单)。 + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/#kubesphere-v321-镜像清单)。 {{}} 2. 下载 `offline-installation-tool.sh`。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. 使 `.sh` 文件可执行。 @@ -96,10 +96,10 @@ weight: 7500 1. 执行以下命令下载 ks-installer,并将其传输至您充当任务机的机器,用于安装。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` -2. 验证您已在 `cluster-configuration.yaml` 中的 `spec.local_registry` 字段指定了私有镜像仓库地址。请注意,如果您的已有集群通过离线安装方式搭建,您应该已配置了此地址。如果您的集群采用在线安装方式搭建而需要进行离线升级,执行以下命令编辑您已有 KubeSphere v3.0.0 集群的 `cluster-configuration.yaml` 文件,并添加私有镜像仓库地址: +2. 验证您已在 `cluster-configuration.yaml` 中的 `spec.local_registry` 字段指定了私有镜像仓库地址。请注意,如果您的已有集群通过离线安装方式搭建,您应该已配置了此地址。如果您的集群采用在线安装方式搭建而需要进行离线升级,执行以下命令编辑您已有 KubeSphere v3.1.x 集群的 `cluster-configuration.yaml` 文件,并添加私有镜像仓库地址: ```bash kubectl edit cc -n kubesphere-system @@ -169,5 +169,3 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx 要访问控制台,请确保在您的安全组中打开端口 30880。 {{}} - -![KubeSphere 登录](/images/docs/zh-cn/upgrade/air-gapped-upgrade-with-ks-installer/kubesphere-login.PNG) \ No newline at end of file diff --git a/content/zh/docs/upgrade/air-gapped-upgrade-with-kubekey.md b/content/zh/docs/upgrade/air-gapped-upgrade-with-kubekey.md index 8c15e35c6..a4706f74c 100644 --- a/content/zh/docs/upgrade/air-gapped-upgrade-with-kubekey.md +++ b/content/zh/docs/upgrade/air-gapped-upgrade-with-kubekey.md @@ -1,6 +1,6 @@ --- title: "使用 KubeKey 离线升级" -keywords: "离线环境, kubernetes, 升级, kubesphere, v3.1.1" +keywords: "离线环境, kubernetes, 升级, kubesphere, 3.2.1" description: "使用离线包升级 Kubernetes 和 KubeSphere。" linkTitle: "使用 KubeKey 离线升级" weight: 7400 @@ -9,8 +9,9 @@ weight: 7400 ## 准备工作 -- 您需要有一个运行 KubeSphere v3.0.0 的集群。如果您的 KubeSphere 是 v2.1.1 或更早的版本,请先升级至 v3.0.0。 -- 请仔细阅读 [Release Notes for 3.1.1](../../release/release-v311/)。 +- 您需要有一个运行 KubeSphere v3.1.x 的集群。如果您的 KubeSphere 是 v3.0.0 或更早的版本,请先升级至 v3.1.x。 +- 您的 Kubernetes 版本必须为 v1.19.x 或更新版本。 +- 请仔细阅读 [3.2.1 版本说明](../../release/release-v321/)。 - 提前备份所有重要的组件。 - Docker 仓库。您需要有一个 Harbor 或其他 Docker 仓库。有关更多信息,请参见[准备一个私有镜像仓库](../../installing-on-linux/introduction/air-gapped-installation/#步骤-2准备一个私有镜像仓库)。 - 请确保每个节点都可以从该 Docker 仓库拉取镜像或向其推送镜像。 @@ -46,7 +47,7 @@ weight: 7400 ### 步骤 1:下载 KubeKey -与在 Linux 上在线安装 KubeSphere 相似,您需要事先[下载 KubeKey v1.1.0](https://github.com/kubesphere/kubekey/releases)。下载 `tar.gz` 文件,将它传输到充当任务机的本地机器上进行安装。解压文件后,执行以下命令,使 `kk` 可执行: +与在 Linux 上在线安装 KubeSphere 相似,您需要事先[下载 KubeKey v1.2.1](https://github.com/kubesphere/kubekey/releases)。下载 `tar.gz` 文件,将它传输到充当任务机的本地机器上进行安装。解压文件后,执行以下命令,使 `kk` 可执行: ```bash chmod +x kk @@ -59,19 +60,19 @@ chmod +x kk 1. 使用以下命令从能够访问互联网的机器上下载镜像清单文件 `images-list.txt`: ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/images-list.txt + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/images-list.txt ``` {{< notice note >}} - 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../installing-on-linux/introduction/air-gapped-installation/#kubesphere-v310-镜像清单)。 + 该文件根据不同的模块列出了 `##+modulename` 下的镜像。您可以按照相同的规则把自己的镜像添加到这个文件中。要查看完整文件,请参见[附录](../../installing-on-linux/introduction/air-gapped-installation/#kubesphere-v321-镜像清单)。 {{}} 2. 下载 `offline-installation-tool.sh`。 ```bash - curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/offline-installation-tool.sh + curl -L -O https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/offline-installation-tool.sh ``` 3. 使 `.sh` 文件可执行。 @@ -101,18 +102,18 @@ chmod +x kk 5. 下载 Kubernetes 二进制文件。 ```bash - ./offline-installation-tool.sh -b -v v1.17.9 + ./offline-installation-tool.sh -b -v v1.21.5 ``` 如果您无法访问 Google 的对象存储服务,请运行以下命令添加环境变量以变更来源。 ```bash - export KKZONE=cn;./offline-installation-tool.sh -b -v v1.17.9 + export KKZONE=cn;./offline-installation-tool.sh -b -v v1.21.5 ``` {{< notice note >}} - - 您可以根据自己的需求变更下载的 Kubernetes 版本。安装 KubeSphere v3.1.1 的建议 Kubernetes 版本:v1.17.9,v1.18.8,v1.19.8 以及 v1.20.4。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.19.8。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 + - 您可以根据自己的需求变更下载的 Kubernetes 版本。安装 KubeSphere 3.2.1 的建议 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 以及 v1.22.x(实验性支持)。如果不指定 Kubernetes 版本,KubeKey 将默认安装 Kubernetes v1.21.5。有关受支持的 Kubernetes 版本的更多信息,请参见[支持矩阵](../../installing-on-linux/introduction/kubekey/#支持矩阵)。 - 您可以通过下载 Kubernetes v1.17.9 二进制文件将 Kubernetes 从 v1.16.13 升级到 v1.17.9。但对于跨多个版本升级,需要事先下载所有中间版本,例如您想将 Kubernetes 从 v1.15.12 升级到 v1.18.6,则需要下载 Kubernetes v1.16.13、v1.17.9 和 v1.18.6 二进制文件。 @@ -158,8 +159,8 @@ chmod +x kk | | Kubernetes | KubeSphere | | ------ | ---------- | ---------- | -| 升级前 | v1.16.13 | v3.0.0 | -| 升级后 | v1.17.9 | v3.1.1 | +| 升级前 | v1.18.6 | v3.1.x | +| 升级后 | v1.21.5 | 3.2.1 | #### 升级集群 @@ -176,7 +177,7 @@ chmod +x kk 例如: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -187,7 +188,7 @@ chmod +x kk #### 编辑配置文件 -编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 {{< notice warning >}} @@ -217,7 +218,7 @@ chmod +x kk privateRegistry: dockerhub.kubekey.local ``` -#### 将单节点集群升级至 KubeSphere v3.1.1 和 Kubernetes v1.17.9 +#### 将单节点集群升级至 KubeSphere 3.2.1 和 Kubernetes v1.21.5 ```bash ./kk upgrade -f config-sample.yaml @@ -225,10 +226,10 @@ chmod +x kk 要将 Kubernetes 升级至特定版本,可以在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x(实验性支持) ### 离线升级多节点集群 @@ -246,8 +247,8 @@ chmod +x kk | | Kubernetes | KubeSphere | | ------ | ---------- | ---------- | -| 升级前 | v1.16.13 | v3.0.0 | -| 升级后 | v1.17.9 | v3.1.1 | +| 升级前 | v1.18.6 | v3.1.x | +| 升级后 | v1.21.5 | 3.2.1 | #### 升级集群 @@ -264,7 +265,7 @@ chmod +x kk 例如: ```bash -./kk create config --with-kubernetes v1.17.9 --with-kubesphere v3.1.1 -f config-sample.yaml +./kk create config --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f config-sample.yaml ``` {{< notice note >}} @@ -275,7 +276,7 @@ chmod +x kk #### 编辑配置文件 -编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)。 +编辑该配置文件 `config-sample.yaml`。请查看[供您参考的示例](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)。 {{< notice warning >}} @@ -307,7 +308,7 @@ chmod +x kk privateRegistry: dockerhub.kubekey.local ``` -#### 将多节点集群升级至 KubeSphere v3.1.1 和 Kubernetes v1.17.9 +#### 将多节点集群升级至 KubeSphere 3.2.1 和 Kubernetes v1.21.5 ```bash ./kk upgrade -f config-sample.yaml @@ -315,8 +316,8 @@ chmod +x kk 要将 Kubernetes 升级至特定版本,可以在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x(实验性支持) diff --git a/content/zh/docs/upgrade/overview.md b/content/zh/docs/upgrade/overview.md index 42ebecf91..4ac9ee94c 100644 --- a/content/zh/docs/upgrade/overview.md +++ b/content/zh/docs/upgrade/overview.md @@ -1,6 +1,6 @@ --- title: "概述" -keywords: "Kubernetes, 升级, KubeSphere, v3.1.1, 升级" +keywords: "Kubernetes, 升级, KubeSphere, 3.2.1, 升级" description: "了解升级之前需要注意的事项,例如版本和升级工具。" linkTitle: "概述" weight: 7100 @@ -8,20 +8,18 @@ weight: 7100 ## 确定您的升级方案 -KubeSphere v3.1.1 与 Kubernetes 1.17.x、1.18.x、1.19.x 以及 1.20.x 兼容: +KubeSphere 3.2.1 与 Kubernetes 1.19.x、1.20.x、1.21.x 以及 1.22.x(实验性支持)兼容: -- 在您升级集群至 KubeSphere v3.1.1 之前,您的 KubeSphere 集群版本必须为 v3.0.0。 +- 在您升级集群至 KubeSphere 3.2.1 之前,您的 KubeSphere 集群版本必须为 v3.1.x。 -- 如果您的现有 KubeSphere v3.0.0 集群安装在 Kubernetes 1.17.x+ 上,您可选择只将 KubeSphere 升级到 v3.1.1 或者同时升级 Kubernetes(到更高版本)和 KubeSphere(到 v3.1.1)。 - -- 如果您的现有 KubeSphere v3.0.0 集群安装在 Kubernetes 1.16.x 或更早版本上,您必须同时升级 Kubernetes(到 1.17.x+)和 KubeSphere(到 v3.1.1)。 +- 如果您的现有 KubeSphere v3.1.x 集群安装在 Kubernetes 1.19.x+ 上,您可选择只将 KubeSphere 升级到 3.2.1 或者同时升级 Kubernetes(到更高版本)和 KubeSphere(到 3.2.1)。 ## 升级前 {{< notice warning >}} - 您应该先在测试环境中实施升级模拟。在测试环境中成功升级并且所有应用程序都正常运行之后,再在生产环境中升级您的集群。 -- 在升级过程中,应用程序可能会短暂中断(尤其是单副本 Pod),请安排合理的升级时间。 +- 在升级过程中,应用程序可能会短暂中断(尤其是单副本容器组),请安排合理的升级时间。 - 建议在生产环境中升级之前备份 etcd 和有状态应用程序。您可以使用 [Velero](https://velero.io/) 来备份和迁移 Kubernetes 资源以及持久化存储卷。 {{}} diff --git a/content/zh/docs/upgrade/upgrade-with-ks-installer.md b/content/zh/docs/upgrade/upgrade-with-ks-installer.md index b17efc216..11af9bb0e 100644 --- a/content/zh/docs/upgrade/upgrade-with-ks-installer.md +++ b/content/zh/docs/upgrade/upgrade-with-ks-installer.md @@ -1,6 +1,6 @@ --- title: "使用 ks-installer 升级" -keywords: "kubernetes, 升级, kubesphere, v3.1.1" +keywords: "kubernetes, 升级, kubesphere, 3.2.1" description: "使用 ks-installer 升级 KubeSphere。" linkTitle: "使用 ks-installer 升级" weight: 7300 @@ -10,19 +10,19 @@ weight: 7300 ## 准备工作 -- 您需要有一个运行 KubeSphere v3.0.0 的集群。如果您的 KubeSphere 是 v2.1.1 或更早的版本,请先升级至 v3.0.0。 -- 请仔细阅读 [Release Notes for 3.1.1](../../release/release-v311/)。 +- 您需要有一个运行 KubeSphere v3.1.x 的集群。如果您的 KubeSphere 是 v3.0.0 或更早的版本,请先升级至 v3.1.x。 +- 请仔细阅读 [3.2.1 版本说明](../../release/release-v321/)。 - 提前备份所有重要的组件。 -- KubeSphere v3.1.1 支持的 Kubernetes 版本:v1.17.x、v1.18.x、v1.19.x 和 v1.20.x。 +- KubeSphere 3.2.1 支持的 Kubernetes 版本:v1.19.x、v1.20.x、v1.21.x 和 v1.22.x(实验性支持)。 ## 应用 ks-installer 运行以下命令升级集群: ```bash -kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml +kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml ``` ## 启用可插拔组件 -您可以在升级后启用 KubeSphere v3.1.1 的[可插拔组件](../../pluggable-components/overview/)以体验该容器平台的更多功能。 \ No newline at end of file +您可以在升级后启用 KubeSphere 3.2.1 的[可插拔组件](../../pluggable-components/overview/)以体验该容器平台的更多功能。 \ No newline at end of file diff --git a/content/zh/docs/upgrade/upgrade-with-kubekey.md b/content/zh/docs/upgrade/upgrade-with-kubekey.md index 49abeeb9b..10ec53b3e 100644 --- a/content/zh/docs/upgrade/upgrade-with-kubekey.md +++ b/content/zh/docs/upgrade/upgrade-with-kubekey.md @@ -1,6 +1,6 @@ --- title: "使用 KubeKey 升级" -keywords: "Kubernetes, 升级, KubeSphere, v3.1.1, KubeKey" +keywords: "Kubernetes, 升级, KubeSphere, 3.2.1, KubeKey" description: "使用 KubeKey 升级 Kubernetes 和 KubeSphere。" linkTitle: "使用 KubeKey 升级" weight: 7200 @@ -13,8 +13,8 @@ weight: 7200 ## 准备工作 -- 您需要有一个运行 KubeSphere v3.0.0 的集群。如果您的 KubeSphere 是 v2.1.1 或更早的版本,请先升级至 v3.0.0。 -- 请仔细阅读 [Release Notes for 3.1.1](../../release/release-v311/)。 +- 您需要有一个运行 KubeSphere v3.1.x 的集群。如果您的 KubeSphere 是 v3.0.0 或更早的版本,请先升级至 v3.1.x。 +- 请仔细阅读 [3.2.1 版本说明](../../release/release-v321/)。 - 提前备份所有重要的组件。 - 确定您的升级方案。本文档中提供 [All-in-One 集群](#all-in-one-集群)和[多节点集群](#多节点集群)的两种升级场景。 @@ -29,7 +29,7 @@ weight: 7200 从 [GitHub 发布页面](https://github.com/kubesphere/kubekey/releases)下载 KubeKey 或直接使用以下命令。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{}} @@ -45,7 +45,7 @@ export KKZONE=cn 执行以下命令下载 KubeKey。 ```bash -curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - +curl -sfL https://get-kk.kubesphere.io | VERSION=v1.2.1 sh - ``` {{< notice note >}} @@ -60,7 +60,7 @@ curl -sfL https://get-kk.kubesphere.io | VERSION=v1.1.1 sh - {{< notice note >}} -执行以上命令会下载最新版 KubeKey (v1.1.1),您可以修改命令中的版本号以下载指定版本。 +执行以上命令会下载最新版 KubeKey (v1.2.1),您可以修改命令中的版本号以下载指定版本。 {{}} @@ -81,18 +81,18 @@ chmod +x kk ### All-in-One 集群 -运行以下命令使用 KubeKey 将您的单节点集群升级至 KubeSphere v3.1.1 和 Kubernetes v1.20.4: +运行以下命令使用 KubeKey 将您的单节点集群升级至 KubeSphere 3.2.1 和 Kubernetes v1.21.5: ```bash -./kk upgrade --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 +./kk upgrade --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 ``` 要将 Kubernetes 升级至特定版本,请在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x(实验性支持) ### 多节点集群 @@ -121,27 +121,27 @@ chmod +x kk {{< notice note >}} -有关更多信息,请参见[编辑配置文件](../../installing-on-linux/introduction/multioverview/#2-编辑配置文件),或参考[完整配置文件](https://github.com/kubesphere/kubekey/blob/release-1.1/docs/config-example.md)中的 `Cluster` 部分获取更多信息。 +有关更多信息,请参见[编辑配置文件](../../installing-on-linux/introduction/multioverview/#2-编辑配置文件),或参考[完整配置文件](https://github.com/kubesphere/kubekey/blob/release-1.2/docs/config-example.md)中的 `Cluster` 部分获取更多信息。 {{}} #### 步骤 3:升级集群 -运行以下命令,将您的集群升级至 KubeSphere v3.1.1 和 Kubernetes v1.20.4: +运行以下命令,将您的集群升级至 KubeSphere 3.2.1 和 Kubernetes v1.21.5: ```bash -./kk upgrade --with-kubernetes v1.20.4 --with-kubesphere v3.1.1 -f sample.yaml +./kk upgrade --with-kubernetes v1.21.5 --with-kubesphere v3.2.1 -f sample.yaml ``` 要将 Kubernetes 升级至特定版本,请在 `--with-kubernetes` 标志后明确指定版本号。以下是可用版本: -- v1.17.0, v1.17.4, v1.17.5, v1.17.6, v1.17.7, v1.17.8, v1.17.9 -- v1.18.3, v1.18.5, v1.18.6, v1.18.8 -- v1.19.0, v1.19.8, v1.19.9 -- v1.20.4, v1.20.6 +- v1.19.x +- v1.20.x +- v1.21.x +- v1.22.x(实验性支持) {{< notice note >}} -若要使用 KubeSphere v3.1.1 的部分新功能,您需要在升级后启用对应的可插拔组件。 +若要使用 KubeSphere 3.2.1 的部分新功能,您需要在升级后启用对应的可插拔组件。 {{}} \ No newline at end of file diff --git a/content/zh/docs/upgrade/what-changed.md b/content/zh/docs/upgrade/what-changed.md index 3dcd3eb0b..453550608 100644 --- a/content/zh/docs/upgrade/what-changed.md +++ b/content/zh/docs/upgrade/what-changed.md @@ -1,13 +1,13 @@ --- title: "升级后的变更" -keywords: "Kubernetes, 升级, KubeSphere, v3.1.1" +keywords: "Kubernetes, 升级, KubeSphere, 3.2.1" description: "了解升级后的变更。" linkTitle: "升级后的变更" weight: 7600 --- -本文说明先前版本现有设置在升级后的变更。如果您想了解 KubeSphere 3.1.1 的所有新功能和优化,请直接参阅 [v3.1.1 发布说明](../../release/release-v311/)。 +本文说明先前版本现有设置在升级后的变更。如果您想了解 KubeSphere 3.2.1 的所有新功能和优化,请直接参阅 [3.2.1 发布说明](../../release/release-v321/)。 ## 访问控制 diff --git a/content/zh/docs/workspace-administration/app-repository/import-helm-repository.md b/content/zh/docs/workspace-administration/app-repository/import-helm-repository.md index 85a158917..3fcca00b5 100644 --- a/content/zh/docs/workspace-administration/app-repository/import-helm-repository.md +++ b/content/zh/docs/workspace-administration/app-repository/import-helm-repository.md @@ -17,19 +17,15 @@ KubeSphere 构建的应用仓库可以让用户使用基于 Helm Chart 的 Kuber - 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../../pluggable-components/app-store/)。 - 您需要准备一个应用仓库。请参考 [Helm 官方文档](https://v2.helm.sh/docs/developing_charts/#the-chart-repository-guide)创建仓库,或者[上传自己的应用至 KubeSphere 公共仓库](../../../workspace-administration/app-repository/upload-app-to-public-repository/)。此外,也可以使用下方步骤中的示例仓库,这里仅用作演示。 -- 您需要创建一个企业空间和一个用户帐户 (`ws-admin`)。该帐户必须在企业空间中被授予 `workspace-admin` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间和一个用户 (`ws-admin`)。该用户必须在企业空间中被授予 `workspace-admin` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../../quick-start/create-workspace-and-project/)。 ## 添加应用仓库 -1. 以 `ws-admin` 身份登录 KubeSphere Web 控制台。在企业空间页面,转到**应用管理**下的**应用仓库**,然后点击**添加仓库**。 - - ![添加仓库](/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-1.png) +1. 以 `ws-admin` 身份登录 KubeSphere Web 控制台。在企业空间页面,转到**应用管理**下的**应用仓库**,然后点击**添加**。 2. 在弹出的对话框中,输入应用仓库名称并添加仓库 URL。例如,输入 `https://charts.kubesphere.io/main`。 - ![应用信息对话框](/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-info-dialogue-2.png) - - - **应用仓库名称**:为仓库设置一个简洁明了的名称,方便用户识别。 + - **名称**:为仓库设置一个简洁明了的名称,方便用户识别。 - **URL**:遵循 RFC 3986 规范并支持以下三种协议: - S3:S3 格式的 URL,例如 `s3..amazonaws.com`,用于访问使用 S3 接口的 Amazon S3 服务。如果您选择此类型,则需要提供 Access Key ID 和 Secret Access Key。 @@ -42,11 +38,11 @@ KubeSphere 构建的应用仓库可以让用户使用基于 Helm Chart 的 Kuber 如果您想要对 HTTP/HTTPS 进行基本访问验证,可以使用类似此格式的 URL:`http://username:password@docs-repo.gd2.qingstor.com`。 {{}} - - **描述信息**:简单介绍应用仓库的主要特性。 + + - **同步周期**:同步远端应用仓库的周期。 + - **描述**:简单介绍应用仓库的主要特性。 3. 输入必需的字段后,点击**验证**以验证 URL。如果 URL 可用,您会在它旁边看到一个绿色的对号,点击**确定**完成操作。 - - ![验证 URL](/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/validate-link-3.png) {{< notice note >}} @@ -55,6 +51,4 @@ KubeSphere 构建的应用仓库可以让用户使用基于 Helm Chart 的 Kuber {{}} -4. 导入完成后,仓库会列在下方的仓库列表中,并且 KubeSphere 会自动加载该仓库中的所有应用,并添加为应用模板。当用户使用应用模板来部署应用时,可以在该仓库中查看这些应用。有关更多信息,请参见[用应用模板部署应用](../../../project-user-guide/application/deploy-app-from-template/)。 - - ![应用仓库列表](/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-list-4.png) \ No newline at end of file +4. 导入完成后,仓库会列在下方的仓库列表中,并且 KubeSphere 会自动加载该仓库中的所有应用,并添加为应用模板。当用户使用应用模板来部署应用时,可以在该仓库中查看这些应用。有关更多信息,请参见[用应用模板部署应用](../../../project-user-guide/application/deploy-app-from-template/)。 \ No newline at end of file diff --git a/content/zh/docs/workspace-administration/department-management.md b/content/zh/docs/workspace-administration/department-management.md index 178f6d1f7..59ee6216f 100644 --- a/content/zh/docs/workspace-administration/department-management.md +++ b/content/zh/docs/workspace-administration/department-management.md @@ -1,67 +1,66 @@ --- -title: "企业组织" +title: "部门管理" keywords: 'KubeSphere, Kubernetes, 部门, 角色, 权限, 用户组' description: '在企业空间中创建部门,将用户分配到不同部门中并授予权限。' -linkTitle: "企业组织" +linkTitle: "部门管理" weight: 9800 --- 本文档介绍如何管理企业空间中的部门。 -企业空间中的部门是用来管理权限的逻辑单元。您可以在部门中设置企业空间角色、多个项目角色以及多个 DevOps 工程角色,还可以将用户分配到部门中以批量管理用户权限。 +企业空间中的部门是用来管理权限的逻辑单元。您可以在部门中设置企业空间角色、多个项目角色以及多个 DevOps 项目角色,还可以将用户分配到部门中以批量管理用户权限。 ## 准备工作 -- 您需要[创建一个企业空间和一个帐户](../../quick-start/create-workspace-and-project/),该帐户需在该企业空间中具有 `workspace-admin` 角色。本文档以 `demo-ws` 企业空间和 `ws-admin` 帐户为例。 -- 如需在一个部门中设置项目角色或者 DevOps 工程角色,则需要在该企业空间中[创建至少一个项目或一个 DevOps 工程](../../quick-start/create-workspace-and-project/)。 +- 您需要[创建一个企业空间和一个用户](../../quick-start/create-workspace-and-project/),该用户需在该企业空间中具有 `workspace-admin` 角色。本文档以 `demo-ws` 企业空间和 `ws-admin` 用户为例。 +- 如需在一个部门中设置项目角色或者 DevOps 项目角色,则需要在该企业空间中[创建至少一个项目或一个 DevOps 项目](../../quick-start/create-workspace-and-project/)。 ## 创建部门 1. 以 `ws-admin` 用户登录 KubeSphere Web 控制台并进入 `demo-ws` 企业空间。 -2. 在左侧导航栏选择**企业空间设置**下的**企业组织**,点击右侧的**维护组织结构**。 +2. 在左侧导航栏选择**企业空间设置**下的**部门管理**,点击右侧的**设置部门**。 -3. 在**维护组织结构**对话框中,设置以下字段,然后点击**确定**创建部门。 +3. 在**设置部门**对话框中,设置以下参数,然后点击**确定**创建部门。 {{< notice note >}} - * 如果企业空间中已经创建过部门,您可以点击**添加新的子部门**为该企业空间添加更多部门。 - - * 您可以在每个部门中创建多个部门和多个子部门。如需创建子部门,在左侧部门树中选择一个部门,然后点击右侧的**添加新的子部门**。 + * 如果企业空间中已经创建过部门,您可以点击**创建部门**为该企业空间添加更多部门。 + * 您可以在每个部门中创建多个部门和多个子部门。如需创建子部门,在左侧部门树中选择一个部门,然后点击右侧的**创建部门**。 {{}} - * **部门名称**:为部门设置一个名称。 + * **名称**:为部门设置一个名称。 * **别名**:为部门设置一个别名。 * **企业空间角色**:当前企业空间中所有部门成员的角色。 - * **绑定项目角色**:一个项目中所有部门成员的角色。您可以点击**添加项目**来指定多个项目角色。每个项目只能指定一个角色。 - * **绑定 DevOps 工程角色**:一个 DevOps 工程中所有部门成员的角色。您可以点击**添加 DevOps 工程**来指定多个 DevOps 工程角色。每个 DevOps 工程只能指定一个角色。 + * **项目角色**:一个项目中所有部门成员的角色。您可以点击**添加项目**来指定多个项目角色。每个项目只能指定一个角色。 + * **DevOps 项目角色**:一个 DevOps 项目中所有部门成员的角色。您可以点击**添加 DevOps 项目**来指定多个 DevOps 项目角色。每个 DevOps 项目只能指定一个角色。 -4. 部门创建完成后,点击**关闭**。在**企业组织**页面,可以在左侧的部门树中看到已创建的部门。 +4. 部门创建完成后,点击**确定**,然后点击**关闭**。在**部门管理**页面,可以在左侧的部门树中看到已创建的部门。 ## 分配用户至部门 -1. 在**企业组织**页面,选择左侧部门树中的一个部门,点击右侧的**未分配**。 +1. 在**部门管理**页面,选择左侧部门树中的一个部门,点击右侧的**未分配**。 -2. 在未分配用户列表中,点击用户右侧的 ,对出现的提示消息点击**确定**,以将用户分配到该部门。 +2. 在用户列表中,点击用户右侧的 ,对出现的提示消息点击**确定**,以将用户分配到该部门。 {{< notice note >}} * 如果部门提供的权限与用户的现有权限重叠,则会为用户添加新的权限。用户的现有权限不受影响。 - * 分配到某个部门的用户可以根据与该部门关联的企业空间角色、项目角色和 DevOps 工程角色来执行操作,而无需被邀请到企业空间、项目和 DevOps 工程中。 + * 分配到某个部门的用户可以根据与该部门关联的企业空间角色、项目角色和 DevOps 项目角色来执行操作,而无需被邀请到企业空间、项目和 DevOps 项目中。 {{}} ## 从部门中移除用户 -1. 在**企业组织**页面,选择左侧部门树中的一个部门,然后点击右侧的**已分配**。 +1. 在**部门管理**页面,选择左侧部门树中的一个部门,然后点击右侧的**已分配**。 2. 在已分配用户列表中,点击用户右侧的 ,在出现的对话框中输入相应的用户名,然后点击**确定**来移除用户。 ## 删除和编辑部门 -1. 在**企业组织**页面,点击**维护组织结构**。 +1. 在**部门管理**页面,点击**设置部门**。 -2. 在**维护组织结构**对话框的左侧,点击需要编辑或删除部门的上级部门。 +2. 在**设置部门**对话框的左侧,点击需要编辑或删除部门的上级部门。 3. 点击部门右侧的 进行编辑。 diff --git a/content/zh/docs/workspace-administration/project-quotas.md b/content/zh/docs/workspace-administration/project-quotas.md index 353de54df..91d6b9bb1 100644 --- a/content/zh/docs/workspace-administration/project-quotas.md +++ b/content/zh/docs/workspace-administration/project-quotas.md @@ -6,51 +6,45 @@ linkTitle: "项目配额" weight: 9600 --- -KubeSphere 使用请求 (Request) 和限制 (Limit) 来控制项目中的资源(例如 CPU 和内存)使用情况,在 Kubernetes 中也称为 [ResourceQuota](https://kubernetes.io/zh/docs/concepts/policy/resource-quotas/)。请求确保项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反地,限制确保项目不能使用超过特定值的资源。 +KubeSphere 使用预留(Request)和限制(Limit)来控制项目中的资源(例如 CPU 和内存)使用情况,在 Kubernetes 中也称为[资源配额](https://kubernetes.io/zh/docs/concepts/policy/resource-quotas/)。请求确保项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反地,限制确保项目不能使用超过特定值的资源。 -除了 CPU 和内存,您还可以单独为其他对象设置资源配额,例如项目中的 Pod、[部署](../../project-user-guide/application-workloads/deployments/)、[任务](../../project-user-guide/application-workloads/jobs/)、[服务](../../project-user-guide/application-workloads/services/)和 [ConfigMap](../../project-user-guide/configuration/configmaps/)。 +除了 CPU 和内存,您还可以单独为其他对象设置资源配额,例如项目中的容器组、[部署](../../project-user-guide/application-workloads/deployments/)、[任务](../../project-user-guide/application-workloads/jobs/)、[服务](../../project-user-guide/application-workloads/services/)和[配置字典](../../project-user-guide/configuration/configmaps/)。 本教程演示如何配置项目配额。 ## 准备工作 -您需要有一个可用的企业空间、一个项目和一个帐户 (`ws-admin`)。该帐户必须在企业空间层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +您需要有一个可用的企业空间、一个项目和一个用户 (`ws-admin`)。该用户必须在企业空间层级拥有 `admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 {{< notice note >}} -如果使用 `project-admin` 帐户(该帐户在项目层级拥有 `admin` 角色),您也可以为新项目(即其配额尚未设置)设置项目配额。不过,项目配额设置完成之后,`project-admin` 无法更改配额。一般情况下,`ws-admin` 负责为项目设置限制和请求。`project-admin` 负责为项目中的容器[设置限制范围](../../project-administration/container-limit-ranges/)。 +如果使用 `project-admin` 用户(该用户在项目层级拥有 `admin` 角色),您也可以为新项目(即其配额尚未设置)设置项目配额。不过,项目配额设置完成之后,`project-admin` 无法更改配额。一般情况下,`ws-admin` 负责为项目设置限制和请求。`project-admin` 负责为项目中的容器[设置限制范围](../../project-administration/container-limit-ranges/)。 {{}} ## 设置项目配额 -1. 以 `ws-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您可以在**概览**页面看到项目配额尚未设置。点击**设置**来配置配额。 - - ![项目配额](/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/project-quotas.PNG) +1. 以 `ws-admin` 身份登录控制台,进入一个项目。如果该项目是新创建的项目,您可以在**概览**页面看到项目配额尚未设置。点击**编辑配额**来配置配额。 2. 在弹出对话框中,您可以看到 KubeSphere 默认不为项目设置任何请求或限制。要设置请求和限制来控制 CPU 和内存资源,请将滑块移动到期望的值或者直接输入数字。字段留空意味着您不设置任何请求或限制。 - ![设置项目配额](/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-project-quotas.PNG) - {{< notice note >}} 限制必须大于请求。 {{}} -3. 要为其他资源设置配额,请点击**添加配额项**,从列表中选择一个对象。 - - ![设置其他资源配额](/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-other-resource-quotas.PNG) +3. 要为其他资源设置配额,在**项目资源配额**下点击**添加**,选择一个资源或输入资源名称并设置配额。 4. 点击**确定**完成配额设置。 5. 在**项目设置**下的**基本信息**页面,您可以查看该项目的所有资源配额。 -6. 要更改项目配额,请在**基本信息**页面点击**项目管理**,然后选择**编辑配额**。 +6. 要更改项目配额,请在**基本信息**页面点击**编辑项目**,然后选择**编辑项目配额**。 {{< notice note >}} - 对于[多集群项目](../../project-administration/project-and-multicluster-project/#多集群项目),**项目管理**下拉菜单中不会显示**编辑配额**选项。若要为多集群项目设置配额,前往**项目设置**下的**配额管理**,并点击**编辑配额**。请注意,由于多集群项目跨集群运行,您可以为多集群项目针对不同集群分别设置资源配额。 + 对于[多集群项目](../../project-administration/project-and-multicluster-project/#多集群项目),**管理项目**下拉菜单中不会显示**编辑配额**选项。若要为多集群项目设置配额,前往**项目设置**下的**项目配额**,并点击**编辑配额**。请注意,由于多集群项目跨集群运行,您可以为多集群项目针对不同集群分别设置资源配额。 {{}} diff --git a/content/zh/docs/workspace-administration/role-and-member-management.md b/content/zh/docs/workspace-administration/role-and-member-management.md index 6a039eace..cecb78868 100644 --- a/content/zh/docs/workspace-administration/role-and-member-management.md +++ b/content/zh/docs/workspace-administration/role-and-member-management.md @@ -1,22 +1,16 @@ --- title: "企业空间角色和成员管理" -keywords: "Kubernetes, workspace, KubeSphere, 多租户" +keywords: "Kubernetes, 企业空间, KubeSphere, 多租户" description: "自定义企业空间角色并将角色授予用户。" linkTitle: "企业空间角色和成员管理" weight: 9400 --- -本教程演示如何在企业空间中管理角色和成员。在企业空间级别,您可以向角色授予以下模块中的权限: - -- **项目管理** -- **DevOps 工程管理** -- **应用管理** -- **访问控制** -- **企业空间设置** +本教程演示如何在企业空间中管理角色和成员。 ## 准备工作 -至少已创建一个企业空间,例如 `demo-workspace`。您还需要准备一个帐户(如 `ws-admin`),该帐户在企业空间级别具有 `workspace-admin` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +至少已创建一个企业空间,例如 `demo-workspace`。您还需要准备一个用户(如 `ws-admin`),该用户在企业空间级别具有 `workspace-admin` 角色。有关更多信息,请参见[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 {{< notice note >}} @@ -26,30 +20,28 @@ weight: 9400 ## 内置角色 -**企业角色**页面列出了以下四个可用的内置角色。创建企业空间时,KubeSphere 会自动创建内置角色,并且内置角色无法进行编辑或删除。您只能查看内置角色的权限或将其分配给用户。 +**企业空间角色**页面列出了以下四个可用的内置角色。创建企业空间时,KubeSphere 会自动创建内置角色,并且内置角色无法进行编辑或删除。您只能查看内置角色的权限或将其分配给用户。 -| **内置角色** | **描述信息** | +| **名称** | **描述** | | ------------------ | ------------------------------------------------------------ | -| `workspace-viewer` | 企业空间的观察者,可以查看企业空间下所有的资源。 | -| `workspace-self-provisioner` | 企业空间普通成员,可以在企业空间下创建项目和 DevOps 工程。 | -| `workspace-regular` | 企业空间普通成员,无法在企业空间下创建项目和 DevOps 工程。 | -| `workspace-admin` | 企业空间管理员,可对任何资源进行任意操作。它可以充分管理企业空间下所有的资源。 | +| `workspace-viewer` | 企业空间观察员,可以查看企业空间中的所有资源。 | +| `workspace-self-provisioner` | 企业空间普通成员,可以查看企业设置、管理应用模板、创建项目和 DevOps 项目。 | +| `workspace-regular` | 企业空间普通成员,可以查看企业空间设置。 | +| `workspace-admin` | 企业空间管理员,可以管理企业空间中的所有资源。 | 若要查看角色所含权限: -1. 以 `ws-admin` 身份登录控制台。在**企业角色**中,点击一个角色(例如,`workspace-admin`)以查看角色详情。 - - ![role-details](/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-details.png) +1. 以 `ws-admin` 身份登录控制台。在**企业空间角色**中,点击一个角色(例如,`workspace-admin`)以查看角色详情。 2. 点击**授权用户**选项卡,查看所有被授予该角色的用户。 -## 创建企业角色 +## 创建企业空间角色 -1. 转到**企业空间设置**下的**企业角色**。 +1. 转到**企业空间设置**下的**企业空间角色**。 -2. 在**企业角色**中,点击**创建**并设置**角色标识符**(例如,`demo-project-admin`)。点击**编辑权限**继续。 +2. 在**企业空间角色**中,点击**创建**并设置**名称**(例如,`demo-project-admin`)。点击**编辑权限**继续。 -3. 在弹出的窗口中,权限归类在不同的**模块**下。在本示例中,点击**项目管理**,并为该角色选择**项目创建**、**项目管理**和**项目查看**。点击**确定**完成操作。 +3. 在弹出的窗口中,权限归类在不同的**功能模块**下。在本示例中,点击**项目管理**,并为该角色选择**项目创建**、**项目管理**和**项目查看**。点击**确定**完成操作。 {{< notice note >}} @@ -57,19 +49,15 @@ weight: 9400 {{}} -4. 新创建的角色将在**企业角色**中列出,点击右侧的 以编辑该角色。 - - ![role-list](/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-list.png) +4. 新创建的角色将在**企业空间角色**中列出,点击右侧的 以编辑该角色的信息、权限,或删除该角色。 ## 邀请新成员 -1. 转到**企业空间设置**下**企业成员**,点击**邀请成员**。 +1. 转到**企业空间设置**下**企业空间成员**,点击**邀请**。 2. 点击右侧的 以邀请一名成员加入企业空间,并为其分配一个角色。 -3. 将成员加入企业空间后,点击**确定**。您可以在**企业成员**列表中查看新邀请的成员。 +3. 将成员加入企业空间后,点击**确定**。您可以在**企业空间成员**列表中查看新邀请的成员。 -4. 若要编辑现有成员的角色或将其从企业空间中移除,点击右侧的 并选择对应的操作。 - - ![edit-existing-user](/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/edit-existing-user.png) +4. 若要编辑现有成员的角色或将其从企业空间中移除,点击右侧的 并选择对应的操作。 \ No newline at end of file diff --git a/content/zh/docs/workspace-administration/upload-helm-based-application.md b/content/zh/docs/workspace-administration/upload-helm-based-application.md index e32348fcf..8fc3091b2 100644 --- a/content/zh/docs/workspace-administration/upload-helm-based-application.md +++ b/content/zh/docs/workspace-administration/upload-helm-based-application.md @@ -13,26 +13,18 @@ KubeSphere 提供应用程序的全生命周期管理。例如,企业空间管 ## 准备工作 - 您需要启用 [KubeSphere 应用商店 (OpenPitrix)](../../pluggable-components/app-store/)。 -- 您需要创建一个企业空间和一个用户帐户 (`project-admin`)。该帐户必须被邀请至企业空间中,并被授予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +- 您需要创建一个企业空间和一个用户 (`project-admin`)。该用户必须被邀请至企业空间中,并被授予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 ## 动手实验 -1. 用 `project-admin` 帐户登录 KubeSphere。在企业空间页面,转到**应用管理**下的**应用模板**,然后点击**上传模板**。 +1. 用 `project-admin` 帐户登录 KubeSphere。在企业空间页面,转到**应用管理**下的**应用模板**,点击**创建**。 - ![上传应用模板](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-app-template-1.png) - -2. 在弹出的对话框中,点击**上传 Helm 配置文件**。您可以上传自己的 Helm chart,或者下载 [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) 用它作为示例来完成接下来的步骤。 - - ![上传 helm](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-helm-2.png) +2. 在弹出的对话框中,点击**上传**。您可以上传自己的 Helm chart,或者下载 [Nginx chart](/files/application-templates/nginx-0.1.0.tgz) 用它作为示例来完成接下来的步骤。 3. 文件包上传完毕后,点击**确定**继续。 - ![确认上传](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/confirm-upload-3.png) - 4. 您可以在**应用信息**下查看应用的基本信息。点击**上传图标**来上传应用的图标。您也可以跳过上传图标,直接点击**确定**。 - ![上传图标](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-icon-4.png) - {{< notice note >}} 应用图标支持的最大分辨率为 96 × 96 像素。 @@ -41,10 +33,6 @@ KubeSphere 提供应用程序的全生命周期管理。例如,企业空间管 5. 成功上传后,模板列表中会列出应用,状态为**开发中**,意味着该应用正在开发中。上传的应用对同一企业空间下的所有成员均可见。 - ![开发中应用](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/draft-app-5.png) - -6. 点击应用,随后打开的页面默认选中**版本**标签。点击待提交版本以展开菜单,您可以在菜单上看到**删除版本**、**测试部署**、**提交审核**的选项。 - - ![版本页面](/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/version-page-6.png) +6. 点击应用,随后打开的页面默认选中**版本**标签。点击待提交版本以展开菜单,您可以在菜单上看到**删除**、**测试**、**提交发布**的选项。 7. 有关如何将应用发布到应用商店的更多信息,请参考[应用程序生命周期管理](../../application-store/app-lifecycle-management/)。 diff --git a/content/zh/docs/workspace-administration/what-is-workspace.md b/content/zh/docs/workspace-administration/what-is-workspace.md index b7f0dc6d7..cf702f4b7 100644 --- a/content/zh/docs/workspace-administration/what-is-workspace.md +++ b/content/zh/docs/workspace-administration/what-is-workspace.md @@ -6,7 +6,7 @@ linkTitle: "企业空间概述" weight: 9100 --- -企业空间是用来管理[项目](../../project-administration/)、[DevOps 工程](../../devops-user-guide/)、[应用模板](../upload-helm-based-application/)和应用仓库的一种逻辑单元。您可以在企业空间中控制资源访问权限,也可以安全地在团队内部分享资源。 +企业空间是用来管理[项目](../../project-administration/)、[DevOps 项目](../../devops-user-guide/)、[应用模板](../upload-helm-based-application/)和应用仓库的一种逻辑单元。您可以在企业空间中控制资源访问权限,也可以安全地在团队内部分享资源。 最佳的做法是为租户(集群管理员除外)创建新的企业空间。同一名租户可以在多个企业空间中工作,并且多个租户可以通过不同方式访问同一个企业空间。 @@ -14,13 +14,11 @@ weight: 9100 ## 准备工作 -准备一个被授予 `workspaces-manager` 角色的帐户,例如[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)中创建的 `ws-manager` 帐户。 +准备一个被授予 `workspaces-manager` 角色的用户,例如[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)中创建的 `ws-manager` 帐户。 ## 创建企业空间 -1. 以 `ws-manager` 身份登录 KubeSphere Web 控制台。在**企业空间**页面,您可以查看平台上的所有企业空间。点击**创建**。 - - ![workspace-list-1](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-list-1.png) +1. 以 `ws-manager` 身份登录 KubeSphere Web 控制台。点击左上角的**平台管理**并选择**访问控制**。在**企业空间**页面,点击**创建**。 {{< notice note >}} @@ -28,23 +26,19 @@ weight: 9100 {{}} -2. 在**基本信息**页面,为创建的企业空间输入名称,并从下拉菜单中选择一名企业空间管理员。点击**创建**以继续。 +2. 对于单节点集群,您需要在**基本信息**页面,为创建的企业空间输入名称,并从下拉菜单中选择一名企业空间管理员。点击**创建**。 - ![set-workspace-info](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/set-workspace-info.png) - - - **企业空间名称**:为企业空间设置一个专属名称。 + - **名称**:为企业空间设置一个专属名称。 - **别名**:该企业空间的另一种名称。 - - **企业空间管理员**:管理该企业空间的帐户。 - - **描述信息**:企业空间的简短介绍。 + - **管理员**:管理该企业空间的用户。 + - **描述**:企业空间的简短介绍。 -3. 新创建的企业空间将在下图所示的列表中列出。 + 对于多节点集群,设置企业空间的基本信息后,点击**下一步**。在**集群设置**页面,选择企业空间需要使用的集群,然后点击**创建**。 - ![workspace-created](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-created.png) +3. 企业空间创建后将显示在企业空间列表中。 4. 点击该企业空间,您可以在**概览**页面查看企业空间中的资源状态。 - ![workspace-overview-4](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-overview-4.png) - ## 删除企业空间 在 KubeSphere 中,可以通过企业空间对项目进行分组管理,企业空间下项目的生命周期会受到企业空间的影响。具体来说,企业空间删除之后,企业空间下的项目及关联的资源也同时会被销毁。 @@ -61,7 +55,7 @@ kubectl label ns kubesphere.io/workspace- && kubectl patch ns }} -以上命令会移除与企业空间关联的 Label 并移除 ownerReferences。之后,您可以将解绑的项目重新[分配给新的企业空间](../../faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace/)。 +以上命令会移除与企业空间关联的标签并移除 ownerReferences。之后,您可以将解绑的项目重新[分配给新的企业空间](../../faq/access-control/add-kubernetes-namespace-to-kubesphere-workspace/)。 {{}} @@ -77,15 +71,13 @@ kubectl label ns kubesphere.io/workspace- && kubectl patch ns }} - 在该页面,您可以点击**编辑信息**更改企业空间的基本信息(企业空间名称无法更改),也可以打开或关闭[网络隔离](../../workspace-administration/workspace-network-isolation/)。 + 在该页面,您可以点击**编辑信息**更改企业空间的基本信息(企业空间名称无法更改),也可以开启或关闭[网络隔离](../../workspace-administration/workspace-network-isolation/)。 {{}} -2. 若要删除企业空间,先勾选**确定删除企业空间**,然后点击**删除**。 +2. 若要删除企业空间,点击**删除企业空间**下的**删除**。在出现的对话框中输入企业空间的名称,然后点击**确定**。 {{< notice warning >}} diff --git a/content/zh/docs/workspace-administration/workspace-network-isolation.md b/content/zh/docs/workspace-administration/workspace-network-isolation.md index 71ac2e83b..81d0e02cb 100644 --- a/content/zh/docs/workspace-administration/workspace-network-isolation.md +++ b/content/zh/docs/workspace-administration/workspace-network-isolation.md @@ -10,7 +10,7 @@ weight: 9500 - 已经启用[网络策略](../../pluggable-components/network-policy/)。 -- 需要使用拥有 `workspace-admin` 角色的帐户。例如,使用在[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)教程中创建的 `ws-admin` 帐户。 +- 需要使用拥有 `workspace-admin` 角色的用户。例如,使用在[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)教程中创建的 `ws-admin` 用户。 {{< notice note >}} @@ -22,8 +22,6 @@ weight: 9500 企业空间网络隔离默认关闭。您可以在**企业空间设置**下的**基本信息**页面开启网络隔离。 -![workspace-isolation-page](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-network-isolation/workspace-isolation-page.png) - {{< notice note >}} 当网络隔离开启时,默认允许出站流量,而不同企业空间的进站流量将被拒绝。如果您需要自定义网络策略,则需要开启[项目网络隔离](../../project-administration/project-network-isolation/)并在**项目设置**中添加网络策略。 @@ -34,6 +32,6 @@ weight: 9500 ## 最佳做法 -要确保企业空间中的所有 Pod 都安全,一个最佳做法是开启企业空间网络隔离。 +要确保企业空间中的所有容器组都安全,一个最佳做法是开启企业空间网络隔离。 当网络隔离开启时,其他企业空间无法访问该企业空间。如果企业空间的默认网络隔离无法满足您的需求,请开启项目网络隔离并自定义您的项目网络策略。 diff --git a/content/zh/docs/workspace-administration/workspace-quotas.md b/content/zh/docs/workspace-administration/workspace-quotas.md index 8e0443b0c..c46fed310 100644 --- a/content/zh/docs/workspace-administration/workspace-quotas.md +++ b/content/zh/docs/workspace-administration/workspace-quotas.md @@ -1,12 +1,12 @@ --- title: "企业空间配额" keywords: 'KubeSphere, Kubernetes, 企业空间, 配额' -description: '设置企业空间配额以管理企业空间中所有项目和 DevOps 工程的总资源用量。' +description: '设置企业空间配额以管理企业空间中所有项目和 DevOps 项目的总资源用量。' linkTitle: "企业空间配额" weight: 9700 --- -企业空间配额用于管理企业空间中所有项目和 DevOps 工程的总资源用量。企业空间配额与[项目配额](../project-quotas/)相似,也包含 CPU 和内存的预留 (Request) 和限制 (Limit)。预留确保企业空间中的项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反,限制则确保企业空间中的所有项目的资源用量不能超过特定数值。 +企业空间配额用于管理企业空间中所有项目和 DevOps 项目的总资源用量。企业空间配额与[项目配额](../project-quotas/)相似,也包含 CPU 和内存的预留(Request)和限制(Limit)。预留确保企业空间中的项目能够获得其所需的资源,因为这些资源已经得到明确保障和预留。相反,限制则确保企业空间中的所有项目的资源用量不能超过特定数值。 在[多集群架构](../../multicluster-management/)中,由于您需要[将一个或多个集群分配到企业空间中](../../cluster-administration/cluster-settings/cluster-visibility-and-authorization/),您可以设置该企业空间在不同集群上的资源用量。 @@ -14,19 +14,17 @@ weight: 9700 ## 准备工作 -您需要准备一个可用的企业空间和一个账户 (`ws-manager`)。该账户必须在平台层级具有 `workspaces-manager` 角色。有关更多信息,请参阅[创建企业空间、项目、帐户和角色](../../quick-start/create-workspace-and-project/)。 +您需要准备一个可用的企业空间和一个用户 (`ws-manager`)。该用户必须在平台层级具有 `workspaces-manager` 角色。有关更多信息,请参阅[创建企业空间、项目、用户和角色](../../quick-start/create-workspace-and-project/)。 ## 设置企业空间配额 1. 使用 `ws-manager` 用户登录 KubeSphere Web 控制台,进入企业空间。 -2. 在**企业空间设置**下,选择**配额管理**。 +2. 在**企业空间设置**下,选择**企业空间配额**。 -3. **配额管理**页面列有分配到该企业空间的全部可用集群,以及各集群的 **CPU 限额**、**CPU 需求**、**内存限额**和**内存需求**。 +3. **企业空间配额**页面列有分配到该企业空间的全部可用集群,以及各集群的 CPU 限额、CPU 需求、内存限额和内存需求。 -4. 在列表右侧点击**编辑配额**即可查看企业空间配额信息。默认情况下,KubeSphere 不为企业空间设置任何资源预留或资源限制。如需设置资源预留或资源限制来管理 CPU 和内存资源,您可以移动 至期望数值或直接输入期望数值。将字段设为空值表示不对资源进行预留或限制。 - - ![edit-workspace-quotas](/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/edit-workspace-quotas.png) +4. 在列表右侧点击**编辑配额**即可查看企业空间配额信息。默认情况下,KubeSphere 不为企业空间设置任何资源预留或资源限制。如需设置资源预留或资源限制来管理 CPU 和内存资源,您可以移动 至期望数值或直接输入期望数值。将字段设为空值表示不对资源进行预留或限制。 {{< notice note >}} diff --git a/content/zh/learn/_index.md b/content/zh/learn/_index.md index 8a435ed4e..efa517676 100644 --- a/content/zh/learn/_index.md +++ b/content/zh/learn/_index.md @@ -1,10 +1,10 @@ --- -title: KubeSphere 云原生技术课堂 +title: 云原生实战 css: "scss/learn.scss" section1: - title: KubeSphere 云原生技术课堂 - content: KubeSphere x CNCF
    最完善的知识体系,从零到一带您入门以 Kubernete 为核心的云原生技术生态 + title: 云原生实战 + content: KubeSphere x 尚硅谷
    最完善的知识体系,从零到一带您入门以 Kubernetes 为核心的云原生技术生态 topImage: "/images/learn/banner.png" section2: @@ -28,20 +28,44 @@ section3: section4: title: 讲师阵容 - description: 本课程由 KubeSphere 开源社区联合 CNCF 倾力制作,课程和资料一切开源免费,特邀 KubeSphere 团队核心研发成员与 DevOps 教练参与录制。 + description: 本课程由 KubeSphere 团队核心研发人员、DevOps 教练和尚硅谷金牌讲师雷神倾心打造,非常适合初学者,制定了完整的学习路线,贯穿了前沿的技术体系,以实战操作方式,对云原生技术栈进行了全面的讲解。 list: - - name: 王老师 - profession: KubeSphere DevOps 研发 - description: 本课程由 KubeSphere 开源社区联合 CNCF 倾力制作,特邀 KubeSphere 团队核心研发成员与 DevOps 教练参与录制。 - image: - - name: 王老师 - profession: KubeSphere DevOps 研发 - description: 本课程由 KubeSphere 开源社区联合 CNCF 倾力制作,特邀 KubeSphere 团队核心研发成员与 DevOps 教练参与录制。 - image: - - name: 王老师 - profession: KubeSphere DevOps 研发 - description: 本课程由 KubeSphere 开源社区联合 CNCF 倾力制作,特邀 KubeSphere 团队核心研发成员与 DevOps 教练参与录制。 - image: + - name: 雷丰阳 + profession: 尚硅谷名师 + description: 号称雷神,业内名师,粉丝遍布全国,Java 技术视频播放量大户,先后从事于电商、金融等各个行业,拥有丰富的项目实战经验,擅长将互联网实战开发经验无缝带入课程。 + image: /images/learn/leify.jpeg + - name: 雷万钧 + profession: KubeSphere 研发 + description: 顾问研发工程师,KubeSphere 可观测团队成员,负责日志、通知和审计功能的开发。OpenFunction 社区维护者,FlunetBit Operator 社区维护者。 + image: /images/learn/leiwj.png + - name: 朱晗 + profession: KubeSphere 可观测性&边缘计算开发工程师 + description: 三年爬虫和数据分析,技术栈 vuejs/python/golang, 擅长爬虫和后端开发。OpenFunction 社区维护者,FlunetBit Operator 社区维护者。 + image: /images/learn/zhuhan.png + - name: 李辉 + profession: KubeSphere 应用商店研发工程师 + description: 从事 k8s,容器相关技术工作三年。 + image: /images/learn/lihui.png + - name: 张亮 + profession: KubeSphere 研发 + description: 主要负责 DevOps 相关的开发工作。热衷开源。 + image: /images/learn/zhangliang.png + - name: 向军涛 + profession: KubeSphere 研发 + description: KubeSphere 平台研发工程师,主要负责监控、告警、事件等可观测性方面的相关开发工作。 + image: /images/learn/juntao.png + - name: 马岩 + profession: KubeSphere 研发 + description: 主要负责认证鉴权相关开发工作,关注云原生安全领域。曾负责过多个企业软件研发及云原生迁移等工作。 + image: /images/learn/mayan.png + - name: 石志国 + profession: QingCloud 高级讲师 + description: 在 IT 领域拥有 16 年的工作经验,其中云、虚拟化、自动化领域拥有超过 10 年工作经验。现任 QingCloud 高级讲师,曾负责 CA Technologies 中国及亚太区云产品及解决方案、虚拟化、自动化及云平台监控方向的培训工作。曾在复旦大学(硕士)、南昌大学(硕士)和厦门大学讲授“云与虚拟化”学分课程。 + image: /images/learn/shizg.png + - name: 郭峰 + profession: KubeSphere Installer 研发 + description: KubeKey Maintainer,openEuler 社区 Cloud Native SIG Maintainer。 + image: /images/learn/guofeng.png section5: title: 受益人群 list: @@ -81,5 +105,4 @@ section7: base: 成都/武汉 workYears: 3〜5年 link: ---- - +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/_index.md b/content/zh/learn/level_1/_index.md index 3c18eefeb..cd99a2c93 100644 --- a/content/zh/learn/level_1/_index.md +++ b/content/zh/learn/level_1/_index.md @@ -1,5 +1,6 @@ --- -linkTitle: Level 1:容器与 Kubernetes 技术基础 +linkTitle: 第一章:容器化基础 +weight: 1 _build: render: false diff --git a/content/zh/learn/level_1/lesson_1/_index.md b/content/zh/learn/level_1/lesson_1/_index.md index 37260b1e3..89d6ecf32 100644 --- a/content/zh/learn/level_1/lesson_1/_index.md +++ b/content/zh/learn/level_1/lesson_1/_index.md @@ -1,10 +1,10 @@ --- -linkTitle: 第一堂“云原生”课 +linkTitle: 课程简介 weight: 1 _build: render: false -profit: 了解容器技术的概念、本质、发展趋势等 -time: 2020-10-13 20:00-20:40 +profit: 从整体了解课程大纲 +time: 2021-12-17 20:00-20:40 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_1/content.md b/content/zh/learn/level_1/lesson_1/content.md index 1d1ca093b..054e8fc17 100644 --- a/content/zh/learn/level_1/lesson_1/content.md +++ b/content/zh/learn/level_1/lesson_1/content.md @@ -1,9 +1,9 @@ --- -title: Kubesphere | 第一堂“云原生”课 +title: 课程简介 keywords: Kubesphere, Kubesphere learn -description: Kubesphere +description: 从整体了解课程大纲 -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/Lab_Docker%20%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85%E4%B8%8E%E4%BD%BF%E7%94%A8_20201019.pdf +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd --- diff --git a/content/zh/learn/level_1/lesson_1/courseware.md b/content/zh/learn/level_1/lesson_1/courseware.md deleted file mode 100644 index 0c7746296..000000000 --- a/content/zh/learn/level_1/lesson_1/courseware.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Kubesphere | 第一堂“云原生”课 -keywords: Kubesphere, Kubesphere learn -description: Kubesphere - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B_20201016.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_1/examination.md b/content/zh/learn/level_1/lesson_1/examination.md deleted file mode 100644 index 291bf7ff9..000000000 --- a/content/zh/learn/level_1/lesson_1/examination.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Kubesphere | 第一堂“云原生”课 -keywords: Kubesphere, Kubesphere learn -description: Kubesphere - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E8%80%83%E9%A2%98-%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_1/video.md b/content/zh/learn/level_1/lesson_1/video.md index a117362dd..1420b2d0b 100644 --- a/content/zh/learn/level_1/lesson_1/video.md +++ b/content/zh/learn/level_1/lesson_1/video.md @@ -1,10 +1,9 @@ --- -title: Kubesphere | 第一堂“云原生”课 +title: 课程简介 keywords: Kubesphere, Kubesphere learn -description: Kubesphere +description: 从整体了解课程大纲 video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B.mp4 + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/01%E3%80%81%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98-%E8%AF%BE%E7%A8%8B%E7%AE%80%E4%BB%8B.mp4 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_10/_index.md b/content/zh/learn/level_1/lesson_10/_index.md new file mode 100644 index 000000000..c5c38f30b --- /dev/null +++ b/content/zh/learn/level_1/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Docker 安装 +weight: 10 + +_build: + render: false + +profit: 了解如何在 Linux 中安装 Docker +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_10/content.md b/content/zh/learn/level_1/lesson_10/content.md new file mode 100644 index 000000000..7dc3fbc78 --- /dev/null +++ b/content/zh/learn/level_1/lesson_10/content.md @@ -0,0 +1,9 @@ +--- +title: Docker 安装 +keywords: Kubesphere, Kubesphere learn +description: 了解如何在 Linux 中安装 Docker + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/mbvigg#3k0Q4 + +--- + diff --git a/content/zh/learn/level_1/lesson_10/video.md b/content/zh/learn/level_1/lesson_10/video.md new file mode 100644 index 000000000..f3db1802f --- /dev/null +++ b/content/zh/learn/level_1/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: Docker 安装 +keywords: Kubesphere, Kubesphere learn +description: 了解如何在 Linux 中安装 Docker + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/10%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-Docker%E5%AE%89%E8%A3%85.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_11/_index.md b/content/zh/learn/level_1/lesson_11/_index.md new file mode 100644 index 000000000..fddd6838f --- /dev/null +++ b/content/zh/learn/level_1/lesson_11/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 镜像操作 +weight: 11 + +_build: + render: false + +profit: 通过 Docker CLI 来管理镜像 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_11/content.md b/content/zh/learn/level_1/lesson_11/content.md new file mode 100644 index 000000000..777d69e93 --- /dev/null +++ b/content/zh/learn/level_1/lesson_11/content.md @@ -0,0 +1,9 @@ +--- +title: 镜像操作 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来管理镜像 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#lyZyO + +--- + diff --git a/content/zh/learn/level_1/lesson_11/video.md b/content/zh/learn/level_1/lesson_11/video.md new file mode 100644 index 000000000..4cf664b22 --- /dev/null +++ b/content/zh/learn/level_1/lesson_11/video.md @@ -0,0 +1,8 @@ +--- +title: 镜像操作 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来管理镜像 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/11%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E9%95%9C%E5%83%8F%E6%93%8D%E4%BD%9C.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_12/_index.md b/content/zh/learn/level_1/lesson_12/_index.md new file mode 100644 index 000000000..fb0c33cdb --- /dev/null +++ b/content/zh/learn/level_1/lesson_12/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 容器操作 +weight: 12 + +_build: + render: false + +profit: 通过 Docker CLI 来启动容器 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_12/content.md b/content/zh/learn/level_1/lesson_12/content.md new file mode 100644 index 000000000..db90b7365 --- /dev/null +++ b/content/zh/learn/level_1/lesson_12/content.md @@ -0,0 +1,9 @@ +--- +title: 容器操作 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来启动容器 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#xBX8Z + +--- + diff --git a/content/zh/learn/level_1/lesson_12/video.md b/content/zh/learn/level_1/lesson_12/video.md new file mode 100644 index 000000000..803bfc8fd --- /dev/null +++ b/content/zh/learn/level_1/lesson_12/video.md @@ -0,0 +1,8 @@ +--- +title: 容器操作 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来启动容器 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/12%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E5%AE%B9%E5%99%A8%E6%93%8D%E4%BD%9C.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_13/_index.md b/content/zh/learn/level_1/lesson_13/_index.md new file mode 100644 index 000000000..329cd57e3 --- /dev/null +++ b/content/zh/learn/level_1/lesson_13/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 修改容器内容 +weight: 13 + +_build: + render: false + +profit: 通过 Docker CLI 来修改容器内容 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_13/content.md b/content/zh/learn/level_1/lesson_13/content.md new file mode 100644 index 000000000..0d91ff59b --- /dev/null +++ b/content/zh/learn/level_1/lesson_13/content.md @@ -0,0 +1,9 @@ +--- +title: 修改容器内容 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来修改容器内容 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Rngn4 + +--- + diff --git a/content/zh/learn/level_1/lesson_13/video.md b/content/zh/learn/level_1/lesson_13/video.md new file mode 100644 index 000000000..267c750d6 --- /dev/null +++ b/content/zh/learn/level_1/lesson_13/video.md @@ -0,0 +1,8 @@ +--- +title: 修改容器内容 +keywords: Kubesphere, Kubesphere learn +description: 通过 Docker CLI 来修改容器内容 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/13%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E8%BF%9B%E5%85%A5%E5%AE%B9%E5%99%A8%E4%BF%AE%E6%94%B9%E5%86%85%E5%AE%B9.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_14/_index.md b/content/zh/learn/level_1/lesson_14/_index.md index baecbd937..63a8e6dfe 100644 --- a/content/zh/learn/level_1/lesson_14/_index.md +++ b/content/zh/learn/level_1/lesson_14/_index.md @@ -1,10 +1,10 @@ --- -linkTitle: KubeSphere 上发布单容器应用 +linkTitle: 提交改变 weight: 14 _build: render: false -profit: 了解 KubeSphere 发布容器应用的基本操作 -time: 2020-10-13 20:00-20:40 +profit: 将容器保存为镜像 +time: 2021-12-18 20:00-20:40 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_14/content.md b/content/zh/learn/level_1/lesson_14/content.md index 278200d6e..ec9a2fb32 100644 --- a/content/zh/learn/level_1/lesson_14/content.md +++ b/content/zh/learn/level_1/lesson_14/content.md @@ -1,7 +1,9 @@ --- -title: KubeSphere | KubeSphere 上发布单容器应用 +title: 提交改变 +keywords: Kubesphere, Kubesphere learn +description: 将容器保存为镜像 -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-14/KSCE-200-J001-14-Deployment-one-Container-Service-on-Kubesphere-lab.pdf +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#xNDfm --- diff --git a/content/zh/learn/level_1/lesson_14/courseware.md b/content/zh/learn/level_1/lesson_14/courseware.md deleted file mode 100644 index ff7fa415e..000000000 --- a/content/zh/learn/level_1/lesson_14/courseware.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: KubeSphere | KubeSphere 上发布单容器应用 - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-14/KSCE-200-J001-14-Deployment-one-Container-Service-on-Kubesphere-ppt.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_14/video.md b/content/zh/learn/level_1/lesson_14/video.md index a109a4d00..e6342528d 100644 --- a/content/zh/learn/level_1/lesson_14/video.md +++ b/content/zh/learn/level_1/lesson_14/video.md @@ -1,7 +1,8 @@ --- -title: KubeSphere | KubeSphere 上发布单容器应用 +title: 提交改变 +keywords: Kubesphere, Kubesphere learn +description: 将容器保存为镜像 video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-14/KSCE-200-J001-14-Deployment-one-Container-Service-on-Kubesphere.mp4 ---- \ No newline at end of file + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/15%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E9%95%9C%E5%83%8F%E4%BF%9D%E5%AD%98.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_15/_index.md b/content/zh/learn/level_1/lesson_15/_index.md new file mode 100644 index 000000000..dec8fa774 --- /dev/null +++ b/content/zh/learn/level_1/lesson_15/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 镜像保存 +weight: 15 + +_build: + render: false + +profit: 将镜像保存为压缩包 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_15/content.md b/content/zh/learn/level_1/lesson_15/content.md new file mode 100644 index 000000000..1a8515c4d --- /dev/null +++ b/content/zh/learn/level_1/lesson_15/content.md @@ -0,0 +1,9 @@ +--- +title: 镜像保存 +keywords: Kubesphere, Kubesphere learn +description: 将镜像保存为压缩包 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#D8iHm + +--- + diff --git a/content/zh/learn/level_1/lesson_15/video.md b/content/zh/learn/level_1/lesson_15/video.md new file mode 100644 index 000000000..26bac456b --- /dev/null +++ b/content/zh/learn/level_1/lesson_15/video.md @@ -0,0 +1,8 @@ +--- +title: 镜像保存 +keywords: Kubesphere, Kubesphere learn +description: 将镜像保存为压缩包 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/15%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E9%95%9C%E5%83%8F%E4%BF%9D%E5%AD%98.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_16/_index.md b/content/zh/learn/level_1/lesson_16/_index.md new file mode 100644 index 000000000..6cecc4804 --- /dev/null +++ b/content/zh/learn/level_1/lesson_16/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 镜像推送 +weight: 16 + +_build: + render: false + +profit: 将镜像推送到镜像仓库 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_16/content.md b/content/zh/learn/level_1/lesson_16/content.md new file mode 100644 index 000000000..890aafb59 --- /dev/null +++ b/content/zh/learn/level_1/lesson_16/content.md @@ -0,0 +1,9 @@ +--- +title: 镜像推送 +keywords: Kubesphere, Kubesphere learn +description: 将镜像推送到镜像仓库 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#sI9ai + +--- + diff --git a/content/zh/learn/level_1/lesson_16/video.md b/content/zh/learn/level_1/lesson_16/video.md new file mode 100644 index 000000000..d2903036f --- /dev/null +++ b/content/zh/learn/level_1/lesson_16/video.md @@ -0,0 +1,8 @@ +--- +title: 镜像推送 +keywords: Kubesphere, Kubesphere learn +description: 将镜像推送到镜像仓库 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/16%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E9%95%9C%E5%83%8F%E6%8E%A8%E9%80%81.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_17/_index.md b/content/zh/learn/level_1/lesson_17/_index.md new file mode 100644 index 000000000..843b69de6 --- /dev/null +++ b/content/zh/learn/level_1/lesson_17/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 挂载主机目录 +weight: 17 + +_build: + render: false + +profit: 将持久化数据挂载到主机目录 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_17/content.md b/content/zh/learn/level_1/lesson_17/content.md new file mode 100644 index 000000000..6d5b4343d --- /dev/null +++ b/content/zh/learn/level_1/lesson_17/content.md @@ -0,0 +1,9 @@ +--- +title: 挂载主机目录 +keywords: Kubesphere, Kubesphere learn +description: 将持久化数据挂载到主机目录 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#kWdAh + +--- + diff --git a/content/zh/learn/level_1/lesson_17/video.md b/content/zh/learn/level_1/lesson_17/video.md new file mode 100644 index 000000000..f4734c46f --- /dev/null +++ b/content/zh/learn/level_1/lesson_17/video.md @@ -0,0 +1,8 @@ +--- +title: 挂载主机目录 +keywords: Kubesphere, Kubesphere learn +description: 将持久化数据挂载到主机目录 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/17%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E6%8C%82%E8%BD%BD.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_18/_index.md b/content/zh/learn/level_1/lesson_18/_index.md new file mode 100644 index 000000000..5c38aacd5 --- /dev/null +++ b/content/zh/learn/level_1/lesson_18/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 补充内容 +weight: 18 + +_build: + render: false + +profit: 常用命令补充 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_18/content.md b/content/zh/learn/level_1/lesson_18/content.md new file mode 100644 index 000000000..eae1c45b4 --- /dev/null +++ b/content/zh/learn/level_1/lesson_18/content.md @@ -0,0 +1,9 @@ +--- +title: 补充内容 +keywords: Kubesphere, Kubesphere learn +description: 常用命令补充 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#kWdAh + +--- + diff --git a/content/zh/learn/level_1/lesson_18/video.md b/content/zh/learn/level_1/lesson_18/video.md new file mode 100644 index 000000000..c4b816bd7 --- /dev/null +++ b/content/zh/learn/level_1/lesson_18/video.md @@ -0,0 +1,8 @@ +--- +title: 补充内容 +keywords: Kubesphere, Kubesphere learn +description: 常用命令补充 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/18%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%91%BD%E4%BB%A4-%E8%A1%A5%E5%85%85.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_19/_index.md b/content/zh/learn/level_1/lesson_19/_index.md new file mode 100644 index 000000000..009291887 --- /dev/null +++ b/content/zh/learn/level_1/lesson_19/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-创建 Java 应用 +weight: 19 + +_build: + render: false + +profit: 使用 Docker 创建并启动 Java 容器 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_19/content.md b/content/zh/learn/level_1/lesson_19/content.md new file mode 100644 index 000000000..05d488b4c --- /dev/null +++ b/content/zh/learn/level_1/lesson_19/content.md @@ -0,0 +1,9 @@ +--- +title: 进阶-创建 Java 应用 +keywords: Kubesphere, Kubesphere learn +description: 使用 Docker 创建并启动 Java 容器 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx + +--- + diff --git a/content/zh/learn/level_1/lesson_19/video.md b/content/zh/learn/level_1/lesson_19/video.md new file mode 100644 index 000000000..c315c36f4 --- /dev/null +++ b/content/zh/learn/level_1/lesson_19/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-创建 Java 应用 +keywords: Kubesphere, Kubesphere learn +description: 使用 Docker 创建并启动 Java 容器 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/19%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-%E5%88%9B%E5%BB%BAJava%E5%BA%94%E7%94%A8.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_2/_index.md b/content/zh/learn/level_1/lesson_2/_index.md index a2985cd7e..bbd9354a3 100644 --- a/content/zh/learn/level_1/lesson_2/_index.md +++ b/content/zh/learn/level_1/lesson_2/_index.md @@ -1,10 +1,10 @@ --- -linkTitle: 容器环境准备-Docker +linkTitle: 云计算简单概念 weight: 2 _build: render: false -profit: 了解容器技术的概念、本质、发展趋势等 -time: 2020-10-13 20:00-20:40 +profit: 了解云计算的相关概念 +time: 2021-12-17 20:00-20:40 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_2/content.md b/content/zh/learn/level_1/lesson_2/content.md index 831cf699b..26bd51848 100644 --- a/content/zh/learn/level_1/lesson_2/content.md +++ b/content/zh/learn/level_1/lesson_2/content.md @@ -1,7 +1,9 @@ --- -title: Kubesphere | 第二堂“云原生”课 +title: 云计算简单概念 +keywords: Kubesphere, Kubesphere learn +description: 了解云计算的相关概念 -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/Lab_Docker%20%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85%E4%B8%8E%E4%BD%BF%E7%94%A8_20201019.pdf +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd --- diff --git a/content/zh/learn/level_1/lesson_2/courseware.md b/content/zh/learn/level_1/lesson_2/courseware.md deleted file mode 100644 index e6916b405..000000000 --- a/content/zh/learn/level_1/lesson_2/courseware.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Kubesphere | 第二堂“云原生”课 - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B_20201016.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_2/video.md b/content/zh/learn/level_1/lesson_2/video.md index ddcfd7857..f829f94b4 100644 --- a/content/zh/learn/level_1/lesson_2/video.md +++ b/content/zh/learn/level_1/lesson_2/video.md @@ -1,7 +1,8 @@ --- -title: Kubesphere | 第二堂“云原生”课 +title: 云计算简单概念 +keywords: Kubesphere, Kubesphere learn +description: 了解云计算的相关概念 video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-docs.pek3b.qingstor.com/website/meetup/meetup-final-1226.mp4 + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/02%E3%80%81%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98-%E4%BA%91%E8%AE%A1%E7%AE%97%E7%AE%80%E5%8D%95%E6%A6%82%E5%BF%B5.mp4 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_20/_index.md b/content/zh/learn/level_1/lesson_20/_index.md new file mode 100644 index 000000000..1ef0c2a13 --- /dev/null +++ b/content/zh/learn/level_1/lesson_20/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-部署 Redis +weight: 20 + +_build: + render: false + +profit: 使用 Docker 创建并启动 Redis 容器 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_20/content.md b/content/zh/learn/level_1/lesson_20/content.md new file mode 100644 index 000000000..f2cbf0bf9 --- /dev/null +++ b/content/zh/learn/level_1/lesson_20/content.md @@ -0,0 +1,9 @@ +--- +title: 进阶-部署 Redis +keywords: Kubesphere, Kubesphere learn +description: 使用 Docker 创建并启动 Redis 容器 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx + +--- + diff --git a/content/zh/learn/level_1/lesson_20/video.md b/content/zh/learn/level_1/lesson_20/video.md new file mode 100644 index 000000000..6d689b565 --- /dev/null +++ b/content/zh/learn/level_1/lesson_20/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-部署 Redis +keywords: Kubesphere, Kubesphere learn +description: 使用 Docker 创建并启动 Redis 容器 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/20%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-%E9%83%A8%E7%BD%B2redis.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_21/_index.md b/content/zh/learn/level_1/lesson_21/_index.md new file mode 100644 index 000000000..2e716caf0 --- /dev/null +++ b/content/zh/learn/level_1/lesson_21/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-Redis 设置密码访问 +weight: 21 + +_build: + render: false + +profit: 为 Redis 容器设置安全密码 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_21/content.md b/content/zh/learn/level_1/lesson_21/content.md new file mode 100644 index 000000000..762c9eba6 --- /dev/null +++ b/content/zh/learn/level_1/lesson_21/content.md @@ -0,0 +1,8 @@ +--- +title: 进阶-Redis 设置密码访问 +keywords: Kubesphere, Kubesphere learn +description: 为 Redis 容器设置安全密码 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#NSsog +--- + diff --git a/content/zh/learn/level_1/lesson_21/video.md b/content/zh/learn/level_1/lesson_21/video.md new file mode 100644 index 000000000..308e49981 --- /dev/null +++ b/content/zh/learn/level_1/lesson_21/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-Redis 设置密码访问 +keywords: Kubesphere, Kubesphere learn +description: 为 Redis 容器设置安全密码 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/21%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-redis%E8%AE%BE%E7%BD%AE%E5%AF%86%E7%A0%81%E8%AE%BF%E9%97%AE.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_22/_index.md b/content/zh/learn/level_1/lesson_22/_index.md new file mode 100644 index 000000000..b6f9600e5 --- /dev/null +++ b/content/zh/learn/level_1/lesson_22/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-统计访问人数场景 +weight: 22 + +_build: + render: false + +profit: 结合 Redis 和 Spring Boot 统计访问人数场景 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_22/content.md b/content/zh/learn/level_1/lesson_22/content.md new file mode 100644 index 000000000..524eee822 --- /dev/null +++ b/content/zh/learn/level_1/lesson_22/content.md @@ -0,0 +1,8 @@ +--- +title: 进阶-统计访问人数场景 +keywords: Kubesphere, Kubesphere learn +description: 结合 Redis 和 Spring Boot 统计访问人数场景 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx +--- + diff --git a/content/zh/learn/level_1/lesson_22/video.md b/content/zh/learn/level_1/lesson_22/video.md new file mode 100644 index 000000000..d0ee97cbf --- /dev/null +++ b/content/zh/learn/level_1/lesson_22/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-统计访问人数场景 +keywords: Kubesphere, Kubesphere learn +description: 结合 Redis 和 Spring Boot 统计访问人数场景 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/22%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-%E7%BB%9F%E8%AE%A1%E8%AE%BF%E9%97%AE%E4%BA%BA%E6%95%B0%E5%9C%BA%E6%99%AF.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_23/_index.md b/content/zh/learn/level_1/lesson_23/_index.md new file mode 100644 index 000000000..611e7d8d3 --- /dev/null +++ b/content/zh/learn/level_1/lesson_23/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-Dockerfile 构建任意应用镜像 +weight: 23 + +_build: + render: false + +profit: 编写 Dockerfile 将自己的应用打包成镜像 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_23/content.md b/content/zh/learn/level_1/lesson_23/content.md new file mode 100644 index 000000000..6fc21cd08 --- /dev/null +++ b/content/zh/learn/level_1/lesson_23/content.md @@ -0,0 +1,8 @@ +--- +title: 进阶-Dockerfile 构建任意应用镜像 +keywords: Kubesphere, Kubesphere learn +description: 编写 Dockerfile 将自己的应用打包成镜像 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx +--- + diff --git a/content/zh/learn/level_1/lesson_23/video.md b/content/zh/learn/level_1/lesson_23/video.md new file mode 100644 index 000000000..ac5efe346 --- /dev/null +++ b/content/zh/learn/level_1/lesson_23/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-Dockerfile 构建任意应用镜像 +keywords: Kubesphere, Kubesphere learn +description: 编写 Dockerfile 将自己的应用打包成镜像 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/23%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-Dockerfile%E6%9E%84%E5%BB%BA%E4%BB%BB%E6%84%8F%E5%BA%94%E7%94%A8%E9%95%9C%E5%83%8F.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_24/_index.md b/content/zh/learn/level_1/lesson_24/_index.md new file mode 100644 index 000000000..a2d0df14c --- /dev/null +++ b/content/zh/learn/level_1/lesson_24/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 进阶-应用分享与启动 +weight: 24 + +_build: + render: false + +profit: 将自定义镜像推送到公共镜像仓库 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_24/content.md b/content/zh/learn/level_1/lesson_24/content.md new file mode 100644 index 000000000..4b2e017b4 --- /dev/null +++ b/content/zh/learn/level_1/lesson_24/content.md @@ -0,0 +1,8 @@ +--- +title: 进阶-应用分享与启动 +keywords: Kubesphere, Kubesphere learn +description: 将自定义镜像推送到公共镜像仓库 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx +--- + diff --git a/content/zh/learn/level_1/lesson_24/video.md b/content/zh/learn/level_1/lesson_24/video.md new file mode 100644 index 000000000..759e38cd7 --- /dev/null +++ b/content/zh/learn/level_1/lesson_24/video.md @@ -0,0 +1,8 @@ +--- +title: 进阶-应用分享与启动 +keywords: Kubesphere, Kubesphere learn +description: 将自定义镜像推送到公共镜像仓库 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/24%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E8%BF%9B%E9%98%B6-%E5%BA%94%E7%94%A8%E5%88%86%E4%BA%AB%E4%B8%8E%E5%90%AF%E5%8A%A8.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_25/_index.md b/content/zh/learn/level_1/lesson_25/_index.md new file mode 100644 index 000000000..8bc2c2e54 --- /dev/null +++ b/content/zh/learn/level_1/lesson_25/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Docker入门实战完成 +weight: 25 + +_build: + render: false + +profit: 本章总结 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_25/content.md b/content/zh/learn/level_1/lesson_25/content.md new file mode 100644 index 000000000..5043ef814 --- /dev/null +++ b/content/zh/learn/level_1/lesson_25/content.md @@ -0,0 +1,8 @@ +--- +title: Docker入门实战完成 +keywords: Kubesphere, Kubesphere learn +description: 本章总结 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/ox16bw#Tulhx +--- + diff --git a/content/zh/learn/level_1/lesson_25/video.md b/content/zh/learn/level_1/lesson_25/video.md new file mode 100644 index 000000000..4164187ea --- /dev/null +++ b/content/zh/learn/level_1/lesson_25/video.md @@ -0,0 +1,8 @@ +--- +title: Docker入门实战完成 +keywords: Kubesphere, Kubesphere learn +description: 本章总结 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/25%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-Docker%E5%85%A5%E9%97%A8%E5%AE%9E%E6%88%98%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_3/_index.md b/content/zh/learn/level_1/lesson_3/_index.md new file mode 100644 index 000000000..0c9dc4364 --- /dev/null +++ b/content/zh/learn/level_1/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 阿里云服务器开通流程 +weight: 3 + +_build: + render: false + +profit: 注册并使用阿里云服务器 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_3/content.md b/content/zh/learn/level_1/lesson_3/content.md new file mode 100644 index 000000000..c65fc97af --- /dev/null +++ b/content/zh/learn/level_1/lesson_3/content.md @@ -0,0 +1,8 @@ +--- +title: 阿里云服务器开通流程 +keywords: Kubesphere, Kubesphere learn +description: 注册并使用阿里云服务器 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_3/video.md b/content/zh/learn/level_1/lesson_3/video.md new file mode 100644 index 000000000..76532af95 --- /dev/null +++ b/content/zh/learn/level_1/lesson_3/video.md @@ -0,0 +1,9 @@ +--- +title: 阿里云服务器开通流程 +keywords: Kubesphere, Kubesphere learn +description: 注册并使用阿里云服务器 + +video: + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/03%E3%80%81%E4%BA%91%E5%B9%B3%E5%8F%B0-%E9%98%BF%E9%87%8C%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E5%BC%80%E9%80%9A%E6%B5%81%E7%A8%8B.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_4/_index.md b/content/zh/learn/level_1/lesson_4/_index.md new file mode 100644 index 000000000..5dff46589 --- /dev/null +++ b/content/zh/learn/level_1/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 测试安装 Nginx +weight: 4 + +_build: + render: false + +profit: 在云平台上部署 Nginx +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_4/content.md b/content/zh/learn/level_1/lesson_4/content.md new file mode 100644 index 000000000..68b86365c --- /dev/null +++ b/content/zh/learn/level_1/lesson_4/content.md @@ -0,0 +1,8 @@ +--- +title: 测试安装 Nginx +keywords: Kubesphere, Kubesphere learn +description: 在云平台上部署 Nginx + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_4/video.md b/content/zh/learn/level_1/lesson_4/video.md new file mode 100644 index 000000000..8e6084657 --- /dev/null +++ b/content/zh/learn/level_1/lesson_4/video.md @@ -0,0 +1,9 @@ +--- +title: 测试安装 Nginx +keywords: Kubesphere, Kubesphere learn +description: 在云平台上部署 Nginx + +video: + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/04%E3%80%81%E4%BA%91%E5%B9%B3%E5%8F%B0-%E6%B5%8B%E8%AF%95%E5%AE%89%E8%A3%85nginx%E5%B9%B6%E8%AE%BF%E9%97%AE.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_5/_index.md b/content/zh/learn/level_1/lesson_5/_index.md new file mode 100644 index 000000000..0dcc30c88 --- /dev/null +++ b/content/zh/learn/level_1/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 服务器的安全组设置 +weight: 5 + +_build: + render: false + +profit: 通过安全组来设置服务器的防火墙 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_5/content.md b/content/zh/learn/level_1/lesson_5/content.md new file mode 100644 index 000000000..520e2a659 --- /dev/null +++ b/content/zh/learn/level_1/lesson_5/content.md @@ -0,0 +1,8 @@ +--- +title: 服务器的安全组设置 +keywords: Kubesphere, Kubesphere learn +description: 通过安全组来设置服务器的防火墙 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_5/video.md b/content/zh/learn/level_1/lesson_5/video.md new file mode 100644 index 000000000..4b68a2198 --- /dev/null +++ b/content/zh/learn/level_1/lesson_5/video.md @@ -0,0 +1,9 @@ +--- +title: 服务器的安全组设置 +keywords: Kubesphere, Kubesphere learn +description: 通过安全组来设置服务器的防火墙 + +video: + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/05%E3%80%81%E4%BA%91%E5%B9%B3%E5%8F%B0-%E6%9C%8D%E5%8A%A1%E5%99%A8%E7%9A%84%E5%AE%89%E5%85%A8%E7%BB%84%E8%AE%BE%E7%BD%AE.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_6/_index.md b/content/zh/learn/level_1/lesson_6/_index.md index 46dca475f..634d79102 100644 --- a/content/zh/learn/level_1/lesson_6/_index.md +++ b/content/zh/learn/level_1/lesson_6/_index.md @@ -1,10 +1,10 @@ --- -linkTitle: 通过KubeSphere S2I构建容器镜像 -weight: 1 +linkTitle: 按量付费优点 +weight: 6 _build: render: false -profit: 了解源码,编译,然后打包成镜像的整个过程 -time: 2020-10-13 20:00-20:40 +profit: 通过按量付费节省成本 +time: 2021-12-18 20:00-20:40 --- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_6/content.md b/content/zh/learn/level_1/lesson_6/content.md index a81c661df..abb9aa5f7 100644 --- a/content/zh/learn/level_1/lesson_6/content.md +++ b/content/zh/learn/level_1/lesson_6/content.md @@ -1,9 +1,9 @@ --- -title: 通过KubeSphere S2I构建容器镜像 +title: 按量付费优点 keywords: Kubesphere, Kubesphere learn -description: Kubesphere +description: 通过按量付费节省成本 -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image_lab.pdf +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd --- diff --git a/content/zh/learn/level_1/lesson_6/courseware.md b/content/zh/learn/level_1/lesson_6/courseware.md deleted file mode 100644 index b9fbc099f..000000000 --- a/content/zh/learn/level_1/lesson_6/courseware.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: 通过KubeSphere S2I构建容器镜像 -keywords: Kubesphere, Kubesphere learn -description: Kubesphere - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_6/examination.md b/content/zh/learn/level_1/lesson_6/examination.md deleted file mode 100644 index 50d759a1c..000000000 --- a/content/zh/learn/level_1/lesson_6/examination.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: 通过KubeSphere S2I构建容器镜像 -keywords: Kubesphere, Kubesphere learn -description: Kubesphere - -pdfUrl: ---- - diff --git a/content/zh/learn/level_1/lesson_6/video.md b/content/zh/learn/level_1/lesson_6/video.md index 4f67f7b58..6d0618cbd 100644 --- a/content/zh/learn/level_1/lesson_6/video.md +++ b/content/zh/learn/level_1/lesson_6/video.md @@ -1,9 +1,8 @@ --- -title: 通过KubeSphere S2I构建容器镜像 +title: 按量付费优点 keywords: Kubesphere, Kubesphere learn -description: Kubesphere +description: 通过按量付费节省成本 video: - - videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image.mp4 + videoUrl: https://kubesphere-community.pek3b.qingstor.com/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/06%E3%80%81%E4%BA%91%E5%B9%B3%E5%8F%B0-%E6%8C%89%E9%87%8F%E4%BB%98%E8%B4%B9%E4%BC%98%E7%82%B9.mp4 --- diff --git a/content/zh/learn/level_1/lesson_7/_index.md b/content/zh/learn/level_1/lesson_7/_index.md new file mode 100644 index 000000000..2003dff73 --- /dev/null +++ b/content/zh/learn/level_1/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 私有网络 VPC 实战 +weight: 7 + +_build: + render: false + +profit: 通过实践来理解 VPC +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_7/content.md b/content/zh/learn/level_1/lesson_7/content.md new file mode 100644 index 000000000..52c8402b7 --- /dev/null +++ b/content/zh/learn/level_1/lesson_7/content.md @@ -0,0 +1,9 @@ +--- +title: 私有网络 VPC 实战 +keywords: Kubesphere, Kubesphere learn +description: 通过实践来理解 VPC + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vfvmcd + +--- + diff --git a/content/zh/learn/level_1/lesson_7/video.md b/content/zh/learn/level_1/lesson_7/video.md new file mode 100644 index 000000000..ecedbc3bb --- /dev/null +++ b/content/zh/learn/level_1/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: 按量付费优点 +keywords: Kubesphere, Kubesphere learn +description: 通过实践来理解 VPC + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/07%E3%80%81%E4%BA%91%E5%B9%B3%E5%8F%B0-%E7%A7%81%E6%9C%89%E7%BD%91%E7%BB%9CVPC%E5%AE%9E%E6%88%98.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_8/_index.md b/content/zh/learn/level_1/lesson_8/_index.md new file mode 100644 index 000000000..3fdb59e4c --- /dev/null +++ b/content/zh/learn/level_1/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Docker 基础概念 +weight: 8 + +_build: + render: false + +profit: 了解容器的优点和架构 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_8/content.md b/content/zh/learn/level_1/lesson_8/content.md new file mode 100644 index 000000000..f896a52ca --- /dev/null +++ b/content/zh/learn/level_1/lesson_8/content.md @@ -0,0 +1,9 @@ +--- +title: Docker 基础概念 +keywords: Kubesphere, Kubesphere learn +description: 了解容器的优点和架构 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/mbvigg + +--- + diff --git a/content/zh/learn/level_1/lesson_8/video.md b/content/zh/learn/level_1/lesson_8/video.md new file mode 100644 index 000000000..cd0990707 --- /dev/null +++ b/content/zh/learn/level_1/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: Docker 基础概念 +keywords: Kubesphere, Kubesphere learn +description: 了解容器的优点和架构 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/08%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-Docker%E6%A6%82%E5%BF%B5.mp4 +--- diff --git a/content/zh/learn/level_1/lesson_9/_index.md b/content/zh/learn/level_1/lesson_9/_index.md new file mode 100644 index 000000000..e0453fbd2 --- /dev/null +++ b/content/zh/learn/level_1/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 开通青云服务器 +weight: 9 + +_build: + render: false + +profit: 熟悉公有云开通服务器的流程 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_1/lesson_9/content.md b/content/zh/learn/level_1/lesson_9/content.md new file mode 100644 index 000000000..2686784de --- /dev/null +++ b/content/zh/learn/level_1/lesson_9/content.md @@ -0,0 +1,9 @@ +--- +title: 开通青云服务器 +keywords: Kubesphere, Kubesphere learn +description: 熟悉公有云开通服务器的流程 + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/mbvigg#2ASxH + +--- + diff --git a/content/zh/learn/level_1/lesson_9/video.md b/content/zh/learn/level_1/lesson_9/video.md new file mode 100644 index 000000000..e692a3192 --- /dev/null +++ b/content/zh/learn/level_1/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: 开通青云服务器 +keywords: Kubesphere, Kubesphere learn +description: 熟悉公有云开通服务器的流程 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/09%E3%80%81%E5%AE%B9%E5%99%A8%E5%8C%96-%E5%BC%80%E9%80%9A%E9%9D%92%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8.mp4 +--- diff --git a/content/zh/learn/level_10/_index.md b/content/zh/learn/level_10/_index.md new file mode 100644 index 000000000..96765534d --- /dev/null +++ b/content/zh/learn/level_10/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第十章:KubeVirt 虚拟机负载管理 +weight: 10 + +_build: + render: false +--- diff --git a/content/zh/learn/level_10/lesson_1/_index.md b/content/zh/learn/level_10/lesson_1/_index.md new file mode 100644 index 000000000..ecd1d0035 --- /dev/null +++ b/content/zh/learn/level_10/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 虚拟化技术介绍 +weight: 1 + +_build: + render: false + +profit: 了解虚拟化的相关概念 +time: 2021-12-19 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_1/content.md b/content/zh/learn/level_10/lesson_1/content.md new file mode 100644 index 000000000..4b009d26b --- /dev/null +++ b/content/zh/learn/level_10/lesson_1/content.md @@ -0,0 +1,7 @@ +--- +title: 虚拟化技术介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解虚拟化的相关概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-%E8%99%9A%E6%8B%9F%E5%8C%96%E6%8A%80%E6%9C%AF%E4%BB%8B%E7%BB%8D.pdf +--- diff --git a/content/zh/learn/level_10/lesson_1/video.md b/content/zh/learn/level_10/lesson_1/video.md new file mode 100644 index 000000000..a1102681b --- /dev/null +++ b/content/zh/learn/level_10/lesson_1/video.md @@ -0,0 +1,8 @@ +--- +title: 虚拟化技术介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解虚拟化的相关概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/60%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-%E8%99%9A%E6%8B%9F%E5%8C%96%E6%8A%80%E6%9C%AF%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_10/_index.md b/content/zh/learn/level_10/lesson_10/_index.md new file mode 100644 index 000000000..5eddbc599 --- /dev/null +++ b/content/zh/learn/level_10/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 虚拟化云平台功能演示 +weight: 10 + +_build: + render: false + +profit: 熟悉 KubeSphere 虚拟化云平台的主要功能 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_10/video.md b/content/zh/learn/level_10/lesson_10/video.md new file mode 100644 index 000000000..5348ae426 --- /dev/null +++ b/content/zh/learn/level_10/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 虚拟化云平台功能演示 +keywords: Kubesphere, Kubesphere learn +description: 熟悉 KubeSphere 虚拟化云平台的主要功能 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/69%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeSphere%20%E8%99%9A%E6%8B%9F%E5%8C%96%E4%BA%91%E5%B9%B3%E5%8F%B0%E5%8A%9F%E8%83%BD%E6%BC%94%E7%A4%BA.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_2/_index.md b/content/zh/learn/level_10/lesson_2/_index.md new file mode 100644 index 000000000..f48440aad --- /dev/null +++ b/content/zh/learn/level_10/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeVirt 介绍 +weight: 2 + +_build: + render: false + +profit: 了解 KubeVirt 的相关概念 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_2/content.md b/content/zh/learn/level_10/lesson_2/content.md new file mode 100644 index 000000000..98c8c2333 --- /dev/null +++ b/content/zh/learn/level_10/lesson_2/content.md @@ -0,0 +1,7 @@ +--- +title: KubeVirt 介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 的相关概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-Kubevirt%E4%BB%8B%E7%BB%8D.pdf +--- diff --git a/content/zh/learn/level_10/lesson_2/video.md b/content/zh/learn/level_10/lesson_2/video.md new file mode 100644 index 000000000..2b5752528 --- /dev/null +++ b/content/zh/learn/level_10/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: KubeVirt 介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 的相关概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/61%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%20%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_3/_index.md b/content/zh/learn/level_10/lesson_3/_index.md new file mode 100644 index 000000000..105a88a53 --- /dev/null +++ b/content/zh/learn/level_10/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeVirt 虚拟机 +weight: 3 + +_build: + render: false + +profit: 了解 KubeVirt 虚拟机的概念 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_3/content.md b/content/zh/learn/level_10/lesson_3/content.md new file mode 100644 index 000000000..ea8d88a4f --- /dev/null +++ b/content/zh/learn/level_10/lesson_3/content.md @@ -0,0 +1,7 @@ +--- +title: KubeVirt 虚拟机 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 虚拟机的概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%E6%A0%B8%E5%BF%83%E6%A6%82%E5%BF%B5%E8%A7%A3%E6%9E%90.pdf +--- diff --git a/content/zh/learn/level_10/lesson_3/video.md b/content/zh/learn/level_10/lesson_3/video.md new file mode 100644 index 000000000..ebf6eee38 --- /dev/null +++ b/content/zh/learn/level_10/lesson_3/video.md @@ -0,0 +1,8 @@ +--- +title: KubeVirt 虚拟机 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 虚拟机的概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/62%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_4/_index.md b/content/zh/learn/level_10/lesson_4/_index.md new file mode 100644 index 000000000..1aeb80541 --- /dev/null +++ b/content/zh/learn/level_10/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeVirt 虚拟机镜像 +weight: 4 + +_build: + render: false + +profit: 了解 KubeVirt 镜像的概念 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_4/content.md b/content/zh/learn/level_10/lesson_4/content.md new file mode 100644 index 000000000..7efafdc73 --- /dev/null +++ b/content/zh/learn/level_10/lesson_4/content.md @@ -0,0 +1,7 @@ +--- +title: KubeVirt 虚拟机镜像 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 镜像的概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%E6%A0%B8%E5%BF%83%E6%A6%82%E5%BF%B5%E8%A7%A3%E6%9E%90.pdf +--- diff --git a/content/zh/learn/level_10/lesson_4/video.md b/content/zh/learn/level_10/lesson_4/video.md new file mode 100644 index 000000000..6362a6344 --- /dev/null +++ b/content/zh/learn/level_10/lesson_4/video.md @@ -0,0 +1,8 @@ +--- +title: KubeVirt 虚拟机镜像 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 镜像的概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/63%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E9%95%9C%E5%83%8F.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_5/_index.md b/content/zh/learn/level_10/lesson_5/_index.md new file mode 100644 index 000000000..9b0206324 --- /dev/null +++ b/content/zh/learn/level_10/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeVirt 磁盘和卷 +weight: 5 + +_build: + render: false + +profit: 了解 KubeVirt 磁盘和卷的概念 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_5/content.md b/content/zh/learn/level_10/lesson_5/content.md new file mode 100644 index 000000000..4019090cd --- /dev/null +++ b/content/zh/learn/level_10/lesson_5/content.md @@ -0,0 +1,7 @@ +--- +title: KubeVirt 磁盘和卷 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 磁盘和卷的概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%E6%A0%B8%E5%BF%83%E6%A6%82%E5%BF%B5%E8%A7%A3%E6%9E%90.pdf +--- diff --git a/content/zh/learn/level_10/lesson_5/video.md b/content/zh/learn/level_10/lesson_5/video.md new file mode 100644 index 000000000..efad0cf43 --- /dev/null +++ b/content/zh/learn/level_10/lesson_5/video.md @@ -0,0 +1,8 @@ +--- +title: KubeVirt 磁盘和卷 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 磁盘和卷的概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/64%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%20%E7%A3%81%E7%9B%98%E5%92%8C%E5%8D%B7.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_6/_index.md b/content/zh/learn/level_10/lesson_6/_index.md new file mode 100644 index 000000000..e6ee6d3e2 --- /dev/null +++ b/content/zh/learn/level_10/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeVirt 网络 +weight: 6 + +_build: + render: false + +profit: 了解 KubeVirt 网络的概念 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_6/content.md b/content/zh/learn/level_10/lesson_6/content.md new file mode 100644 index 000000000..e860e373a --- /dev/null +++ b/content/zh/learn/level_10/lesson_6/content.md @@ -0,0 +1,7 @@ +--- +title: KubeVirt 网络 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 网络的概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%E6%A0%B8%E5%BF%83%E6%A6%82%E5%BF%B5%E8%A7%A3%E6%9E%90.pdf +--- diff --git a/content/zh/learn/level_10/lesson_6/video.md b/content/zh/learn/level_10/lesson_6/video.md new file mode 100644 index 000000000..05c17d8ef --- /dev/null +++ b/content/zh/learn/level_10/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: KubeVirt 网络 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeVirt 网络的概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/65%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeVirt%20%E7%BD%91%E7%BB%9C.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_7/_index.md b/content/zh/learn/level_10/lesson_7/_index.md new file mode 100644 index 000000000..bbca840c0 --- /dev/null +++ b/content/zh/learn/level_10/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 使用 KubeVirt 管理虚拟机负载 +weight: 7 + +_build: + render: false + +profit: 了解如何使用 KubeVirt 来管理虚拟机 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_7/content.md b/content/zh/learn/level_10/lesson_7/content.md new file mode 100644 index 000000000..e4f61db02 --- /dev/null +++ b/content/zh/learn/level_10/lesson_7/content.md @@ -0,0 +1,7 @@ +--- +title: 使用 KubeVirt 管理虚拟机负载 +keywords: Kubesphere, Kubesphere learn +description: 了解如何使用 KubeVirt 来管理虚拟机 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-%E4%BD%BF%E7%94%A8KubeVirt%E7%AE%A1%E7%90%86%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD.pdf +--- diff --git a/content/zh/learn/level_10/lesson_7/video.md b/content/zh/learn/level_10/lesson_7/video.md new file mode 100644 index 000000000..18e4c8910 --- /dev/null +++ b/content/zh/learn/level_10/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: 使用 KubeVirt 管理虚拟机负载 +keywords: Kubesphere, Kubesphere learn +description: 了解如何使用 KubeVirt 来管理虚拟机 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/66%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-%E4%BD%BF%E7%94%A8%20KubeVirt%20%E7%AE%A1%E7%90%86%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_8/_index.md b/content/zh/learn/level_10/lesson_8/_index.md new file mode 100644 index 000000000..cd28a867e --- /dev/null +++ b/content/zh/learn/level_10/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 虚拟化云平台及功能介绍 +weight: 8 + +_build: + render: false + +profit: KubeSphere 虚拟化管理平台介绍及功能介绍 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_8/content.md b/content/zh/learn/level_10/lesson_8/content.md new file mode 100644 index 000000000..ee55bd430 --- /dev/null +++ b/content/zh/learn/level_10/lesson_8/content.md @@ -0,0 +1,7 @@ +--- +title: KubeSphere 虚拟化云平台及功能介绍 +keywords: Kubesphere, Kubesphere learn +description: KubeSphere 虚拟化管理平台介绍及功能介绍 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeSphere%20%E8%99%9A%E6%8B%9F%E5%8C%96%E7%AE%A1%E7%90%86%E5%B9%B3%E5%8F%B0%E4%BB%8B%E7%BB%8D%E5%8F%8A%E5%8A%9F%E8%83%BD%E4%BB%8B%E7%BB%8D.pdf +--- diff --git a/content/zh/learn/level_10/lesson_8/video.md b/content/zh/learn/level_10/lesson_8/video.md new file mode 100644 index 000000000..c98e34640 --- /dev/null +++ b/content/zh/learn/level_10/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 虚拟化云平台及功能介绍 +keywords: Kubesphere, Kubesphere learn +description: KubeSphere 虚拟化管理平台介绍及功能介绍 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/67%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeSphere%20%E8%99%9A%E6%8B%9F%E5%8C%96%E4%BA%91%E5%B9%B3%E5%8F%B0%E5%8F%8A%E5%8A%9F%E8%83%BD%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_10/lesson_9/_index.md b/content/zh/learn/level_10/lesson_9/_index.md new file mode 100644 index 000000000..00f28b124 --- /dev/null +++ b/content/zh/learn/level_10/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 虚拟化云平台安装 +weight: 9 + +_build: + render: false + +profit: KubeSphere 虚拟化云平台单节点安装 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_10/lesson_9/video.md b/content/zh/learn/level_10/lesson_9/video.md new file mode 100644 index 000000000..88085d8a5 --- /dev/null +++ b/content/zh/learn/level_10/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 虚拟化云平台安装 +keywords: Kubesphere, Kubesphere learn +description: KubeSphere 虚拟化云平台单节点安装 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/68%E3%80%81KubeVirt%20%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%B4%9F%E8%BD%BD%E7%AE%A1%E7%90%86-KubeSphere%20%E8%99%9A%E6%8B%9F%E5%8C%96%E4%BA%91%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85.mp4 +--- diff --git a/content/zh/learn/level_2/Container_foundations/lesson_1/_index.md b/content/zh/learn/level_2/Container_foundations/lesson_1/_index.md deleted file mode 100644 index b60f2e9aa..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_1/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -linkTitle: 容器技术发展简介 -weight: 3 - -_build: - render: false - -profit: 了解容器的概念及其发展简史。 -time: - ---- diff --git a/content/zh/learn/level_2/Container_foundations/lesson_1/content.md b/content/zh/learn/level_2/Container_foundations/lesson_1/content.md deleted file mode 100644 index 3a8f52f9b..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_1/content.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Kubesphere | 容器技术发展简介 - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-1/1_container_history_brief.pdf - ---- - diff --git a/content/zh/learn/level_2/Container_foundations/lesson_1/video.md b/content/zh/learn/level_2/Container_foundations/lesson_1/video.md deleted file mode 100644 index 6fc65ddb8..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_1/video.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Kubesphere | 容器技术发展简介 - -video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-1/1_container_history_brief.mp4 - ---- diff --git a/content/zh/learn/level_2/Container_foundations/lesson_2/_index.md b/content/zh/learn/level_2/Container_foundations/lesson_2/_index.md deleted file mode 100644 index 7bdbe4614..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_2/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -linkTitle: 容器环境准备 - docker -weight: 3 - -_build: - render: false - -profit: 了解 Docker 运行环境的创建及基本使用。 -time: - ---- diff --git a/content/zh/learn/level_2/Container_foundations/lesson_2/content.md b/content/zh/learn/level_2/Container_foundations/lesson_2/content.md deleted file mode 100644 index cf988b730..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_2/content.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: KubeSphere | 容器环境准备 - docker - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-2/2_container_environment_provision.pdf - ---- - diff --git a/content/zh/learn/level_2/Container_foundations/lesson_2/video.md b/content/zh/learn/level_2/Container_foundations/lesson_2/video.md deleted file mode 100644 index 9c077cd39..000000000 --- a/content/zh/learn/level_2/Container_foundations/lesson_2/video.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: KubeSphere | 容器环境准备 - docker - -video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-2/2_container_environment_provision.mp4 - ---- diff --git a/content/zh/learn/level_2/_index.md b/content/zh/learn/level_2/_index.md index b75a7003c..53c44a3ec 100644 --- a/content/zh/learn/level_2/_index.md +++ b/content/zh/learn/level_2/_index.md @@ -1,6 +1,7 @@ --- -linkTitle: Level 2:动手实践之扩大建造与运维 +linkTitle: 第二章:Kubernetes 基础 +weight: 2 _build: render: false ---- \ No newline at end of file +--- diff --git a/content/zh/learn/level_2/lesson_27/_index.md b/content/zh/learn/level_2/lesson_27/_index.md deleted file mode 100644 index 378e0ae9d..000000000 --- a/content/zh/learn/level_2/lesson_27/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -linkTitle: 应用调度 -weight: 4 - -_build: - render: false - -profit: 了解 KubeSphere 上应用调度方法 -time: 2020-10-13 20:00-20:40 ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_27/content.md b/content/zh/learn/level_2/lesson_27/content.md deleted file mode 100644 index 179d66dd6..000000000 --- a/content/zh/learn/level_2/lesson_27/content.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: KubeSphere | 应用调度 - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications-lab.pdf - ---- - diff --git a/content/zh/learn/level_2/lesson_27/courseware.md b/content/zh/learn/level_2/lesson_27/courseware.md deleted file mode 100644 index 1fbc3b218..000000000 --- a/content/zh/learn/level_2/lesson_27/courseware.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: KubeSphere | 应用调度 - -pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications-ppt.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_27/video.md b/content/zh/learn/level_2/lesson_27/video.md deleted file mode 100644 index aa737ebed..000000000 --- a/content/zh/learn/level_2/lesson_27/video.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: KubeSphere | 应用调度 - -video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications.mp4 - ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_3/_index.md b/content/zh/learn/level_2/lesson_3/_index.md deleted file mode 100644 index 9091bdfb1..000000000 --- a/content/zh/learn/level_2/lesson_3/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -linkTitle: 容器技术发展简介 -weight: 3 - -_build: - render: false - -profit: 了解容器技术的概念、本质、发展趋势等 本质、发展趋势等 发展趋势等 本质、发展趋势等 -time: 2020-10-13 20:00-20:40 ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_3/content.md b/content/zh/learn/level_2/lesson_3/content.md deleted file mode 100644 index c2dde02b1..000000000 --- a/content/zh/learn/level_2/lesson_3/content.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Kubesphere | 第三堂“云原生”课 - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/Lab_Docker%20%E8%BF%90%E8%A1%8C%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85%E4%B8%8E%E4%BD%BF%E7%94%A8_20201019.pdf - ---- - diff --git a/content/zh/learn/level_2/lesson_3/examination.md b/content/zh/learn/level_2/lesson_3/examination.md deleted file mode 100644 index 2ea5601c2..000000000 --- a/content/zh/learn/level_2/lesson_3/examination.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Kubesphere | 第三堂“云原生”课 - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E8%80%83%E9%A2%98-%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_3/video.md b/content/zh/learn/level_2/lesson_3/video.md deleted file mode 100644 index 9523ecb66..000000000 --- a/content/zh/learn/level_2/lesson_3/video.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Kubesphere | 第三堂“云原生”课 - -video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-docs.pek3b.qingstor.com/website/meetup/meetup-final-1226.mp4 ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_4/_index.md b/content/zh/learn/level_2/lesson_4/_index.md deleted file mode 100644 index 477060bce..000000000 --- a/content/zh/learn/level_2/lesson_4/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -linkTitle: 第四堂“云原生”课 -weight: 4 - -_build: - render: false - -profit: 了解容器技术的概念、本质、发展趋势等 本质、发展趋势等 发展趋势等 本质、发展趋势等 -time: 2020-10-13 20:00-20:40 ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_4/courseware.md b/content/zh/learn/level_2/lesson_4/courseware.md deleted file mode 100644 index 3e146bd49..000000000 --- a/content/zh/learn/level_2/lesson_4/courseware.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Kubesphere | 第四堂“云原生”课 - -pdfUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B_20201016.pdf ---- \ No newline at end of file diff --git a/content/zh/learn/level_2/lesson_4/video.md b/content/zh/learn/level_2/lesson_4/video.md deleted file mode 100644 index 0d0af6da6..000000000 --- a/content/zh/learn/level_2/lesson_4/video.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Kubesphere | 第四堂“云原生”课 - -video: - snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png - videoUrl: https://kubesphere-docs.pek3b.qingstor.com/website/%E4%BA%91%E5%8E%9F%E7%94%9F%E8%AF%BE%E7%A8%8B/lesson1/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5_1_%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF%E5%8F%91%E5%B1%95%E7%AE%80%E4%BB%8B.mp4 - ---- \ No newline at end of file diff --git a/content/zh/learn/level_3/_index.md b/content/zh/learn/level_3/_index.md new file mode 100644 index 000000000..3c7a86422 --- /dev/null +++ b/content/zh/learn/level_3/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第三章:使用 KubeSphere 简化 Kubernetes 集群部署 +weight: 3 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_70/_index.md b/content/zh/learn/level_3/lesson_70/_index.md new file mode 100644 index 000000000..a5bbab567 --- /dev/null +++ b/content/zh/learn/level_3/lesson_70/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 升配与重置系统 +weight: 70 + +_build: + render: false + +profit: 从头部署的前置准备工作,升级服务器配置与重置系统 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_70/content.md b/content/zh/learn/level_3/lesson_70/content.md new file mode 100644 index 000000000..72f41843e --- /dev/null +++ b/content/zh/learn/level_3/lesson_70/content.md @@ -0,0 +1,9 @@ +--- +title: 升配与重置系统 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/gby0ar + +--- + diff --git a/content/zh/learn/level_3/lesson_70/video.md b/content/zh/learn/level_3/lesson_70/video.md new file mode 100644 index 000000000..7018b0de3 --- /dev/null +++ b/content/zh/learn/level_3/lesson_70/video.md @@ -0,0 +1,6 @@ +--- +title: 升配与重置系统 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/70%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%8D%87%E9%85%8D%E4%B8%8E%E9%87%8D%E7%BD%AE%E7%B3%BB%E7%BB%9F.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_71/_index.md b/content/zh/learn/level_3/lesson_71/_index.md new file mode 100644 index 000000000..77ead0c9f --- /dev/null +++ b/content/zh/learn/level_3/lesson_71/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 安装 Kubernetes 集群 +weight: 71 + +_build: + render: false + +profit: 安装 Docker、Kubernetes 集群 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_71/content.md b/content/zh/learn/level_3/lesson_71/content.md new file mode 100644 index 000000000..1cc47f121 --- /dev/null +++ b/content/zh/learn/level_3/lesson_71/content.md @@ -0,0 +1,9 @@ +--- +title: 安装 Kubernetes 集群 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/gz1sls#twdlI + +--- + diff --git a/content/zh/learn/level_3/lesson_71/video.md b/content/zh/learn/level_3/lesson_71/video.md new file mode 100644 index 000000000..dfca7e235 --- /dev/null +++ b/content/zh/learn/level_3/lesson_71/video.md @@ -0,0 +1,6 @@ +--- +title: 安装 Kubernetes 集群 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/71%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%AE%89%E8%A3%85Kubernetes%E9%9B%86%E7%BE%A4.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_72/_index.md b/content/zh/learn/level_3/lesson_72/_index.md new file mode 100644 index 000000000..110fa9829 --- /dev/null +++ b/content/zh/learn/level_3/lesson_72/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 安装默认存储类型 +weight: 72 + +_build: + render: false + +profit: 安装 KubeSphere 前置环境-安装默认存储类型 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_72/content.md b/content/zh/learn/level_3/lesson_72/content.md new file mode 100644 index 000000000..6d48161e2 --- /dev/null +++ b/content/zh/learn/level_3/lesson_72/content.md @@ -0,0 +1,9 @@ +--- +title: 安装默认存储类型 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/gz1sls#twdlI + +--- + diff --git a/content/zh/learn/level_3/lesson_72/video.md b/content/zh/learn/level_3/lesson_72/video.md new file mode 100644 index 000000000..fa67aa827 --- /dev/null +++ b/content/zh/learn/level_3/lesson_72/video.md @@ -0,0 +1,6 @@ +--- +title: 安装默认存储类型 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/72%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%89%8D%E7%BD%AE%E7%8E%AF%E5%A2%83-%E5%AE%89%E8%A3%85%E9%BB%98%E8%AE%A4%E5%AD%98%E5%82%A8%E7%B1%BB%E5%9E%8B.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_73/_index.md b/content/zh/learn/level_3/lesson_73/_index.md new file mode 100644 index 000000000..6a4282c8a --- /dev/null +++ b/content/zh/learn/level_3/lesson_73/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 安装 metrics-server +weight: 73 + +_build: + render: false + +profit: 安装 metrics-server 集群监控组件 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_73/content.md b/content/zh/learn/level_3/lesson_73/content.md new file mode 100644 index 000000000..2d036efc8 --- /dev/null +++ b/content/zh/learn/level_3/lesson_73/content.md @@ -0,0 +1,9 @@ +--- +title: 安装 metrics-server +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/gz1sls#flJaV + +--- + diff --git a/content/zh/learn/level_3/lesson_73/video.md b/content/zh/learn/level_3/lesson_73/video.md new file mode 100644 index 000000000..7bee51359 --- /dev/null +++ b/content/zh/learn/level_3/lesson_73/video.md @@ -0,0 +1,6 @@ +--- +title: 安装 metrics-server + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/73%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%89%8D%E7%BD%AE%E7%8E%AF%E5%A2%83-%E5%AE%89%E8%A3%85metrics-server.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_74/_index.md b/content/zh/learn/level_3/lesson_74/_index.md new file mode 100644 index 000000000..e527a5f38 --- /dev/null +++ b/content/zh/learn/level_3/lesson_74/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 全功能安装完成 +weight: 74 + +_build: + render: false + +profit: 全功能安装 KubeSphere +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_74/content.md b/content/zh/learn/level_3/lesson_74/content.md new file mode 100644 index 000000000..28ba9bcb8 --- /dev/null +++ b/content/zh/learn/level_3/lesson_74/content.md @@ -0,0 +1,9 @@ +--- +title: 全功能安装完成 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/gz1sls#wqcbi + +--- + diff --git a/content/zh/learn/level_3/lesson_74/video.md b/content/zh/learn/level_3/lesson_74/video.md new file mode 100644 index 000000000..88aabb4ad --- /dev/null +++ b/content/zh/learn/level_3/lesson_74/video.md @@ -0,0 +1,6 @@ +--- +title: 全功能安装完成 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/74%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%85%A8%E5%8A%9F%E8%83%BD%E5%AE%89%E8%A3%85%E5%AE%8C%E6%88%90.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_75/_index.md b/content/zh/learn/level_3/lesson_75/_index.md new file mode 100644 index 000000000..3bbbec4b8 --- /dev/null +++ b/content/zh/learn/level_3/lesson_75/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 单节点上 KubeKey 一键安装完整平台 +weight: 75 + +_build: + render: false + +profit: Linux 单节点上 KubeKey 一键安装 KubeSphere 完整平台 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_75/content.md b/content/zh/learn/level_3/lesson_75/content.md new file mode 100644 index 000000000..16c9e5897 --- /dev/null +++ b/content/zh/learn/level_3/lesson_75/content.md @@ -0,0 +1,9 @@ +--- +title: 单节点上 KubeKey 一键安装完整平台 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/slk3ro + +--- + diff --git a/content/zh/learn/level_3/lesson_75/video.md b/content/zh/learn/level_3/lesson_75/video.md new file mode 100644 index 000000000..7029ca777 --- /dev/null +++ b/content/zh/learn/level_3/lesson_75/video.md @@ -0,0 +1,6 @@ +--- +title: 单节点上 KubeKey 一键安装完整平台 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/75%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%8D%95%E8%8A%82%E7%82%B9%E4%B8%8AKubeKey%E4%B8%80%E9%94%AE%E5%AE%89%E8%A3%85%E5%AE%8C%E6%95%B4%E5%B9%B3%E5%8F%B0.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_76/_index.md b/content/zh/learn/level_3/lesson_76/_index.md new file mode 100644 index 000000000..1a5662f5a --- /dev/null +++ b/content/zh/learn/level_3/lesson_76/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 多节点上 KubeKey 一键安装集群 +weight: 76 + +_build: + render: false + +profit: Linux 多节点上 KubeKey 一键安装 KubeSphere 集群 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_3/lesson_76/content.md b/content/zh/learn/level_3/lesson_76/content.md new file mode 100644 index 000000000..fa54bdd43 --- /dev/null +++ b/content/zh/learn/level_3/lesson_76/content.md @@ -0,0 +1,9 @@ +--- +title: 多节点上 KubeKey 一键安装集群 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/psa230 + +--- + diff --git a/content/zh/learn/level_3/lesson_76/video.md b/content/zh/learn/level_3/lesson_76/video.md new file mode 100644 index 000000000..aad5af87c --- /dev/null +++ b/content/zh/learn/level_3/lesson_76/video.md @@ -0,0 +1,6 @@ +--- +title: 多节点上 KubeKey 一键安装集群 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/76%E3%80%81KubeSphere-%E5%B9%B3%E5%8F%B0%E5%AE%89%E8%A3%85-%E5%A4%9A%E8%8A%82%E7%82%B9%E4%B8%8AKubeKey%E4%B8%80%E9%94%AE%E5%AE%89%E8%A3%85%E9%9B%86%E7%BE%A4.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_4/_index.md b/content/zh/learn/level_4/_index.md new file mode 100644 index 000000000..620f7ea1b --- /dev/null +++ b/content/zh/learn/level_4/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第四章:KubeSphere 单集群功能介绍与使用 +weight: 4 + +_build: + render: false +--- diff --git a/content/zh/learn/level_4/lesson_1/_index.md b/content/zh/learn/level_4/lesson_1/_index.md new file mode 100644 index 000000000..2963dc344 --- /dev/null +++ b/content/zh/learn/level_4/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 基础架构以及多租户实践 - 用户管理 +weight: 1 + +_build: + render: false + +profit: 了解 KubeSphere 的整体架构,以及用户权限和用户管理相关功能 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_1/content.md b/content/zh/learn/level_4/lesson_1/content.md new file mode 100644 index 000000000..401b8e600 --- /dev/null +++ b/content/zh/learn/level_4/lesson_1/content.md @@ -0,0 +1,7 @@ +--- +title: 基础架构以及多租户实践 - 用户管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/dx5ao9 +--- diff --git a/content/zh/learn/level_4/lesson_1/video.md b/content/zh/learn/level_4/lesson_1/video.md new file mode 100644 index 000000000..fc1d2ae74 --- /dev/null +++ b/content/zh/learn/level_4/lesson_1/video.md @@ -0,0 +1,8 @@ +--- +title: 基础架构以及多租户实践 - 用户管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/77%E3%80%81KubeSphere-%E5%A4%9A%E7%A7%9F%E6%88%B7-hr%E8%B4%A6%E6%88%B7%E4%B8%BA%E7%B3%BB%E7%BB%9F%E6%B7%BB%E5%8A%A0%E7%94%A8%E6%88%B7.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_10/_index.md b/content/zh/learn/level_4/lesson_10/_index.md new file mode 100644 index 000000000..0008153cf --- /dev/null +++ b/content/zh/learn/level_4/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 通过应用市场部署 Zookeeper 服务 +weight: 10 + +_build: + render: false + +profit: 了解 KubeSphere 下的应用商店的使用并且部署 Zookeeper 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_10/content.md b/content/zh/learn/level_4/lesson_10/content.md new file mode 100644 index 000000000..fba4fa296 --- /dev/null +++ b/content/zh/learn/level_4/lesson_10/content.md @@ -0,0 +1,7 @@ +--- +title: 通过应用市场部署 Zookeeper 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#KIwlv +--- diff --git a/content/zh/learn/level_4/lesson_10/video.md b/content/zh/learn/level_4/lesson_10/video.md new file mode 100644 index 000000000..2eae9af42 --- /dev/null +++ b/content/zh/learn/level_4/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: 通过应用市场部署 Zookeeper 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/86%E3%80%81KubeSphere-%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93-%E4%BB%8E%E5%BA%94%E7%94%A8%E5%B8%82%E5%9C%BA%E9%83%A8%E7%BD%B2Zookeeper.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_11/_index.md b/content/zh/learn/level_4/lesson_11/_index.md new file mode 100644 index 000000000..2a2454b94 --- /dev/null +++ b/content/zh/learn/level_4/lesson_11/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 部署本地 Nacos 服务 +weight: 11 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署流程以 “若依” 为例,部署本地 Nacos 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_11/content.md b/content/zh/learn/level_4/lesson_11/content.md new file mode 100644 index 000000000..5de6f088d --- /dev/null +++ b/content/zh/learn/level_4/lesson_11/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 部署本地 Nacos 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_11/video.md b/content/zh/learn/level_4/lesson_11/video.md new file mode 100644 index 000000000..cd0dbfcc0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_11/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 部署本地 Nacos 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/87%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E6%9C%AC%E5%9C%B0%E7%8E%AF%E5%A2%83-nacos%E5%90%AF%E5%8A%A8.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_12/_index.md b/content/zh/learn/level_4/lesson_12/_index.md new file mode 100644 index 000000000..188c37993 --- /dev/null +++ b/content/zh/learn/level_4/lesson_12/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 导入本地数据库 +weight: 12 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署,导入本地数据库 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_12/content.md b/content/zh/learn/level_4/lesson_12/content.md new file mode 100644 index 000000000..5beb6917c --- /dev/null +++ b/content/zh/learn/level_4/lesson_12/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 导入本地数据库 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_12/video.md b/content/zh/learn/level_4/lesson_12/video.md new file mode 100644 index 000000000..f418313f0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_12/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 导入本地数据库 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/88%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E6%9C%AC%E5%9C%B0%E7%8E%AF%E5%A2%83-%E5%AF%BC%E5%85%A5%E6%95%B0%E6%8D%AE%E5%BA%93.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_13/_index.md b/content/zh/learn/level_4/lesson_13/_index.md new file mode 100644 index 000000000..2b4c9d80a --- /dev/null +++ b/content/zh/learn/level_4/lesson_13/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 本地启动项目 +weight: 13 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署,本地启动项目测试 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_13/content.md b/content/zh/learn/level_4/lesson_13/content.md new file mode 100644 index 000000000..8ed6035cc --- /dev/null +++ b/content/zh/learn/level_4/lesson_13/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 本地启动项目 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_13/video.md b/content/zh/learn/level_4/lesson_13/video.md new file mode 100644 index 000000000..47735cbe0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_13/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 本地启动项目 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/89%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E6%9C%AC%E5%9C%B0%E7%8E%AF%E5%A2%83-%E5%90%AF%E5%8A%A8%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_14/_index.md b/content/zh/learn/level_4/lesson_14/_index.md new file mode 100644 index 000000000..e937faee2 --- /dev/null +++ b/content/zh/learn/level_4/lesson_14/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署要素 +weight: 14 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署要素以及流程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_14/content.md b/content/zh/learn/level_4/lesson_14/content.md new file mode 100644 index 000000000..a17692e63 --- /dev/null +++ b/content/zh/learn/level_4/lesson_14/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署要素 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_14/video.md b/content/zh/learn/level_4/lesson_14/video.md new file mode 100644 index 000000000..74252233e --- /dev/null +++ b/content/zh/learn/level_4/lesson_14/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署要素 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/90%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E4%B8%8A%E4%BA%91%E9%83%A8%E7%BD%B2-%E5%85%B3%E6%B3%A8%E7%9A%84%E8%A6%81%E7%B4%A0.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_15/_index.md b/content/zh/learn/level_4/lesson_15/_index.md new file mode 100644 index 000000000..5e69b4d72 --- /dev/null +++ b/content/zh/learn/level_4/lesson_15/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云迁移数据库 +weight: 15 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云迁移数据库的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_15/content.md b/content/zh/learn/level_4/lesson_15/content.md new file mode 100644 index 000000000..1214395b6 --- /dev/null +++ b/content/zh/learn/level_4/lesson_15/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云迁移数据库 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_15/video.md b/content/zh/learn/level_4/lesson_15/video.md new file mode 100644 index 000000000..0edef6080 --- /dev/null +++ b/content/zh/learn/level_4/lesson_15/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云迁移数据库 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/91%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E4%B8%8A%E4%BA%91%E9%83%A8%E7%BD%B2-%E8%BF%81%E7%A7%BB%E6%95%B0%E6%8D%AE%E5%BA%93.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_16/_index.md b/content/zh/learn/level_4/lesson_16/_index.md new file mode 100644 index 000000000..e85eef64c --- /dev/null +++ b/content/zh/learn/level_4/lesson_16/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云 Nacos 服务部署分析 +weight: 16 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云 Nacos 服务部署分析配置以及过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_16/content.md b/content/zh/learn/level_4/lesson_16/content.md new file mode 100644 index 000000000..fc7d50b85 --- /dev/null +++ b/content/zh/learn/level_4/lesson_16/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务部署分析 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_16/video.md b/content/zh/learn/level_4/lesson_16/video.md new file mode 100644 index 000000000..b858a10d6 --- /dev/null +++ b/content/zh/learn/level_4/lesson_16/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务部署分析 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/92%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E4%B8%8A%E4%BA%91%E9%83%A8%E7%BD%B2-nacos%E4%B8%8A%E4%BA%91%E5%88%86%E6%9E%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_17/_index.md b/content/zh/learn/level_4/lesson_17/_index.md new file mode 100644 index 000000000..7b06d417a --- /dev/null +++ b/content/zh/learn/level_4/lesson_17/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署 +weight: 17 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云 Nacos 服务高可用部署 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_17/content.md b/content/zh/learn/level_4/lesson_17/content.md new file mode 100644 index 000000000..d0a779b50 --- /dev/null +++ b/content/zh/learn/level_4/lesson_17/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_17/video.md b/content/zh/learn/level_4/lesson_17/video.md new file mode 100644 index 000000000..0662cd999 --- /dev/null +++ b/content/zh/learn/level_4/lesson_17/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/93%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-ruoyi-cloud-%E4%B8%8A%E4%BA%91%E9%83%A8%E7%BD%B2-nacos%E4%B8%8A%E4%BA%91%E9%AB%98%E5%8F%AF%E7%94%A8%E6%A8%A1%E5%BC%8F%E9%83%A8%E7%BD%B2.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_18/_index.md b/content/zh/learn/level_4/lesson_18/_index.md new file mode 100644 index 000000000..b53590d1e --- /dev/null +++ b/content/zh/learn/level_4/lesson_18/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署(补充) +weight: 18 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云 Nacos 服务高可用部署补充部分,添加MySQL端口号 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_18/content.md b/content/zh/learn/level_4/lesson_18/content.md new file mode 100644 index 000000000..2147a574b --- /dev/null +++ b/content/zh/learn/level_4/lesson_18/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署(补充) +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_18/video.md b/content/zh/learn/level_4/lesson_18/video.md new file mode 100644 index 000000000..7e6751809 --- /dev/null +++ b/content/zh/learn/level_4/lesson_18/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云 Nacos 服务高可用部署(补充) +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/94%E3%80%81%EF%BC%88%E8%A1%A5%E5%85%85%EF%BC%89%E9%83%A8%E7%BD%B2nacos%E5%BF%98%E5%86%99mysql%E7%AB%AF%E5%8F%A3%E5%8F%B7.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_19/_index.md b/content/zh/learn/level_4/lesson_19/_index.md new file mode 100644 index 000000000..2fde6e1ec --- /dev/null +++ b/content/zh/learn/level_4/lesson_19/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云配置准备 +weight: 19 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云配置准备 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_19/content.md b/content/zh/learn/level_4/lesson_19/content.md new file mode 100644 index 000000000..a5fd1deec --- /dev/null +++ b/content/zh/learn/level_4/lesson_19/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云配置准备 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_19/video.md b/content/zh/learn/level_4/lesson_19/video.md new file mode 100644 index 000000000..ad8488a97 --- /dev/null +++ b/content/zh/learn/level_4/lesson_19/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云配置准备 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/95%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%E4%BA%91%E4%B8%8A%E7%8E%AF%E5%A2%83Dockerfile%E9%85%8D%E7%BD%AE.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_2/_index.md b/content/zh/learn/level_4/lesson_2/_index.md new file mode 100644 index 000000000..e67699eed --- /dev/null +++ b/content/zh/learn/level_4/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 基础架构以及多租户实践 - 企业空间管理 +weight: 2 + +_build: + render: false + +profit: 了解 KubeSphere 的整体架构,以及企业空间管理相关功能 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_2/content.md b/content/zh/learn/level_4/lesson_2/content.md new file mode 100644 index 000000000..ae81d707d --- /dev/null +++ b/content/zh/learn/level_4/lesson_2/content.md @@ -0,0 +1,7 @@ +--- +title: 基础架构以及多租户实践 - 企业空间管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/dx5ao9 +--- diff --git a/content/zh/learn/level_4/lesson_2/video.md b/content/zh/learn/level_4/lesson_2/video.md new file mode 100644 index 000000000..6ecd1323d --- /dev/null +++ b/content/zh/learn/level_4/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: 基础架构以及多租户实践 - 企业空间管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/78%E3%80%81KubeSphere-%E5%A4%9A%E7%A7%9F%E6%88%B7-wuhan-boss%E9%82%80%E8%AF%B7%E5%85%B6%E4%BB%96%E7%94%A8%E6%88%B7%E8%BF%9B%E5%85%A5%E4%BC%81%E4%B8%9A%E7%A9%BA%E9%97%B4.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_20/_index.md b/content/zh/learn/level_4/lesson_20/_index.md new file mode 100644 index 000000000..220bc4665 --- /dev/null +++ b/content/zh/learn/level_4/lesson_20/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云流程分析 +weight: 20 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云流程分析 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_20/content.md b/content/zh/learn/level_4/lesson_20/content.md new file mode 100644 index 000000000..e16ee3836 --- /dev/null +++ b/content/zh/learn/level_4/lesson_20/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云流程分析 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_20/video.md b/content/zh/learn/level_4/lesson_20/video.md new file mode 100644 index 000000000..7b7138bfe --- /dev/null +++ b/content/zh/learn/level_4/lesson_20/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云流程分析 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/96%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91%E6%B5%81%E7%A8%8B%E5%88%86%E6%9E%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_21/_index.md b/content/zh/learn/level_4/lesson_21/_index.md new file mode 100644 index 000000000..f54358dd6 --- /dev/null +++ b/content/zh/learn/level_4/lesson_21/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云镜像制作与推送 +weight: 21 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云镜像制作与推送 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_21/content.md b/content/zh/learn/level_4/lesson_21/content.md new file mode 100644 index 000000000..808aff8fb --- /dev/null +++ b/content/zh/learn/level_4/lesson_21/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云镜像制作与推送 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_21/video.md b/content/zh/learn/level_4/lesson_21/video.md new file mode 100644 index 000000000..cf4dfacbd --- /dev/null +++ b/content/zh/learn/level_4/lesson_21/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云镜像制作与推送 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/97%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%E9%95%9C%E5%83%8F%E6%8E%A8%E9%80%81.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_22/_index.md b/content/zh/learn/level_4/lesson_22/_index.md new file mode 100644 index 000000000..fdcb53112 --- /dev/null +++ b/content/zh/learn/level_4/lesson_22/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云Dockerfile修改 +weight: 22 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云Dockerfile修改 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_22/content.md b/content/zh/learn/level_4/lesson_22/content.md new file mode 100644 index 000000000..058f91ce8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_22/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云Dockerfile修改 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_22/video.md b/content/zh/learn/level_4/lesson_22/video.md new file mode 100644 index 000000000..fcf0c4991 --- /dev/null +++ b/content/zh/learn/level_4/lesson_22/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云Dockerfile修改 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/98%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%E9%87%8D%E6%96%B0%E4%BF%AE%E6%94%B9Dockerfile.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_23/_index.md b/content/zh/learn/level_4/lesson_23/_index.md new file mode 100644 index 000000000..825a9b46c --- /dev/null +++ b/content/zh/learn/level_4/lesson_23/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 monitor 服务 +weight: 23 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 monitor 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_23/content.md b/content/zh/learn/level_4/lesson_23/content.md new file mode 100644 index 000000000..32de06fa0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_23/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 monitor 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_23/video.md b/content/zh/learn/level_4/lesson_23/video.md new file mode 100644 index 000000000..eaa3e9478 --- /dev/null +++ b/content/zh/learn/level_4/lesson_23/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 monitor 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/99%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-monitor%E4%B8%8A%E4%BA%91%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_24/_index.md b/content/zh/learn/level_4/lesson_24/_index.md new file mode 100644 index 000000000..13372055d --- /dev/null +++ b/content/zh/learn/level_4/lesson_24/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 system 服务 +weight: 24 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 system 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_24/content.md b/content/zh/learn/level_4/lesson_24/content.md new file mode 100644 index 000000000..32ee1061a --- /dev/null +++ b/content/zh/learn/level_4/lesson_24/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 system 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_24/video.md b/content/zh/learn/level_4/lesson_24/video.md new file mode 100644 index 000000000..01f8fef37 --- /dev/null +++ b/content/zh/learn/level_4/lesson_24/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 system 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/100%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-system%E4%B8%8A%E4%BA%91%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_25/_index.md b/content/zh/learn/level_4/lesson_25/_index.md new file mode 100644 index 000000000..e3f73ed66 --- /dev/null +++ b/content/zh/learn/level_4/lesson_25/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 job 服务 +weight: 25 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 job 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_25/content.md b/content/zh/learn/level_4/lesson_25/content.md new file mode 100644 index 000000000..890fef905 --- /dev/null +++ b/content/zh/learn/level_4/lesson_25/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 job 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_25/video.md b/content/zh/learn/level_4/lesson_25/video.md new file mode 100644 index 000000000..276862c07 --- /dev/null +++ b/content/zh/learn/level_4/lesson_25/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 job 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/101%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%20job%E4%B8%8A%E4%BA%91%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_26/_index.md b/content/zh/learn/level_4/lesson_26/_index.md new file mode 100644 index 000000000..6f14d17d3 --- /dev/null +++ b/content/zh/learn/level_4/lesson_26/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 gateway 服务 +weight: 26 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 gateway 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_26/content.md b/content/zh/learn/level_4/lesson_26/content.md new file mode 100644 index 000000000..1b1596cf0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_26/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 gateway 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_26/video.md b/content/zh/learn/level_4/lesson_26/video.md new file mode 100644 index 000000000..b60178dc3 --- /dev/null +++ b/content/zh/learn/level_4/lesson_26/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 gateway 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/102%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%20%20gateway%20%E4%B8%8A%E4%BA%91%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_27/_index.md b/content/zh/learn/level_4/lesson_27/_index.md new file mode 100644 index 000000000..08e279012 --- /dev/null +++ b/content/zh/learn/level_4/lesson_27/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 file 服务 +weight: 27 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 file 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_27/content.md b/content/zh/learn/level_4/lesson_27/content.md new file mode 100644 index 000000000..5f91df57e --- /dev/null +++ b/content/zh/learn/level_4/lesson_27/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 file 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_27/video.md b/content/zh/learn/level_4/lesson_27/video.md new file mode 100644 index 000000000..3a99c26bd --- /dev/null +++ b/content/zh/learn/level_4/lesson_27/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 file 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/103%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%20%20file%E4%B8%8A%E4%BA%91%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_28/_index.md b/content/zh/learn/level_4/lesson_28/_index.md new file mode 100644 index 000000000..bdf6900ff --- /dev/null +++ b/content/zh/learn/level_4/lesson_28/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 auth 服务 +weight: 28 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 auth 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_28/content.md b/content/zh/learn/level_4/lesson_28/content.md new file mode 100644 index 000000000..043056899 --- /dev/null +++ b/content/zh/learn/level_4/lesson_28/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 auth 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_28/video.md b/content/zh/learn/level_4/lesson_28/video.md new file mode 100644 index 000000000..6c9f004e2 --- /dev/null +++ b/content/zh/learn/level_4/lesson_28/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 auth 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/104%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-auth%E4%B8%8A%E4%BA%91.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_29/_index.md b/content/zh/learn/level_4/lesson_29/_index.md new file mode 100644 index 000000000..e8b06328a --- /dev/null +++ b/content/zh/learn/level_4/lesson_29/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 Nacos 服务的存活探针(补充) +weight: 29 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 Nacos 服务的存活探针(补充) +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_29/content.md b/content/zh/learn/level_4/lesson_29/content.md new file mode 100644 index 000000000..5e87d9b6e --- /dev/null +++ b/content/zh/learn/level_4/lesson_29/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 Nacos 服务的存活探针(补充) +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_29/video.md b/content/zh/learn/level_4/lesson_29/video.md new file mode 100644 index 000000000..832d3cecb --- /dev/null +++ b/content/zh/learn/level_4/lesson_29/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 Nacos 服务的存活探针(补充) +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/105-1%E3%80%81%EF%BC%88%E8%A1%A5%E5%85%85%EF%BC%89Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-nacos%E7%9A%84%E5%AD%98%E6%B4%BB%E6%8E%A2%E9%92%88.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_3/_index.md b/content/zh/learn/level_4/lesson_3/_index.md new file mode 100644 index 000000000..3a7bdcfd9 --- /dev/null +++ b/content/zh/learn/level_4/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 基础架构以及多租户实践 - 项目管理 +weight: 3 + +_build: + render: false + +profit: 了解 KubeSphere 的整体架构,以及项目管理相关功能 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_3/content.md b/content/zh/learn/level_4/lesson_3/content.md new file mode 100644 index 000000000..24404680e --- /dev/null +++ b/content/zh/learn/level_4/lesson_3/content.md @@ -0,0 +1,7 @@ +--- +title: 基础架构以及多租户实践 - 项目管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/dx5ao9 +--- diff --git a/content/zh/learn/level_4/lesson_3/video.md b/content/zh/learn/level_4/lesson_3/video.md new file mode 100644 index 000000000..ad97eab96 --- /dev/null +++ b/content/zh/learn/level_4/lesson_3/video.md @@ -0,0 +1,8 @@ +--- +title: 基础架构以及多租户实践 - 项目管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/79%E3%80%81KubeSphere-%E5%A4%9A%E7%A7%9F%E6%88%B7-pm-wang%E5%88%9B%E5%BB%BA%E9%A1%B9%E7%9B%AE%26%E9%82%80%E8%AF%B7%E7%94%A8%E6%88%B7%E8%BF%9B%E5%85%A5%E9%A1%B9%E7%9B%AE.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_30/_index.md b/content/zh/learn/level_4/lesson_30/_index.md new file mode 100644 index 000000000..8161bd132 --- /dev/null +++ b/content/zh/learn/level_4/lesson_30/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “若依” 微服务项目实战 - 上云部署 web 前端服务以及测试 +weight: 30 + +_build: + render: false + +profit: 了解 KubeSphere 下的微服务部署上云部署 web 前端服务以及测试 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_30/content.md b/content/zh/learn/level_4/lesson_30/content.md new file mode 100644 index 000000000..93b0aec4d --- /dev/null +++ b/content/zh/learn/level_4/lesson_30/content.md @@ -0,0 +1,7 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 web 前端服务以及测试 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/puhqea#fl62n +--- diff --git a/content/zh/learn/level_4/lesson_30/video.md b/content/zh/learn/level_4/lesson_30/video.md new file mode 100644 index 000000000..8ea6b1630 --- /dev/null +++ b/content/zh/learn/level_4/lesson_30/video.md @@ -0,0 +1,8 @@ +--- +title: “若依” 微服务项目实战 - 上云部署 web 前端服务以及测试 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/105%E3%80%81Kubernetes%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%9E%E6%88%98-Java%E5%BE%AE%E6%9C%8D%E5%8A%A1%E4%B8%8A%E4%BA%91-%E5%89%8D%E7%AB%AF%E4%B8%8A%E4%BA%91%26%E6%B5%8B%E8%AF%95.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_31/_index.md b/content/zh/learn/level_4/lesson_31/_index.md new file mode 100644 index 000000000..b25cec75d --- /dev/null +++ b/content/zh/learn/level_4/lesson_31/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 基础以及 DevOps 的落地 +weight: 31 + +_build: + render: false + +profit: 了解 DevOps 的概念,以及 KubeSphere 的 DevOps 落地实现 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_31/content.md b/content/zh/learn/level_4/lesson_31/content.md new file mode 100644 index 000000000..160f0096c --- /dev/null +++ b/content/zh/learn/level_4/lesson_31/content.md @@ -0,0 +1,7 @@ +--- +title: DevOps 基础以及 DevOps 的落地 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/qgnps0 +--- diff --git a/content/zh/learn/level_4/lesson_31/video.md b/content/zh/learn/level_4/lesson_31/video.md new file mode 100644 index 000000000..b8f37bbc0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_31/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 基础以及 DevOps 的落地 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/106%E3%80%81devops-%E4%BB%80%E4%B9%88%E6%98%AF%E7%9A%84devops%E4%BB%A5%E5%8F%8Adevops%E7%9A%84%E8%90%BD%E5%9C%B0.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_32/_index.md b/content/zh/learn/level_4/lesson_32/_index.md new file mode 100644 index 000000000..237bad3ce --- /dev/null +++ b/content/zh/learn/level_4/lesson_32/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目简介以及演示 +weight: 32 + +_build: + render: false + +profit: 尚医通微服务项目简介以及操作演示 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_32/content.md b/content/zh/learn/level_4/lesson_32/content.md new file mode 100644 index 000000000..ae4b9221f --- /dev/null +++ b/content/zh/learn/level_4/lesson_32/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目简介以及演示 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#QMVTD +--- diff --git a/content/zh/learn/level_4/lesson_32/video.md b/content/zh/learn/level_4/lesson_32/video.md new file mode 100644 index 000000000..d36ea74ae --- /dev/null +++ b/content/zh/learn/level_4/lesson_32/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目简介以及演示 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/107%E3%80%81devops-%E5%B0%9A%E5%8C%BB%E9%80%9A%E9%A1%B9%E7%9B%AE%E6%BC%94%E7%A4%BA.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_33/_index.md b/content/zh/learn/level_4/lesson_33/_index.md new file mode 100644 index 000000000..b3bcc7955 --- /dev/null +++ b/content/zh/learn/level_4/lesson_33/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - Sentinel 服务部署 +weight: 33 + +_build: + render: false + +profit: 部署尚医通的中间件 Sentinel 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_33/content.md b/content/zh/learn/level_4/lesson_33/content.md new file mode 100644 index 000000000..007062f66 --- /dev/null +++ b/content/zh/learn/level_4/lesson_33/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Sentinel 服务部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#wVfb8 +--- diff --git a/content/zh/learn/level_4/lesson_33/video.md b/content/zh/learn/level_4/lesson_33/video.md new file mode 100644 index 000000000..a9a0b7b59 --- /dev/null +++ b/content/zh/learn/level_4/lesson_33/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Sentinel 服务部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/108%E3%80%81devops-%E5%B0%9A%E5%8C%BB%E9%80%9A-%E4%B8%AD%E9%97%B4%E4%BB%B6-sentinel%E9%83%A8%E7%BD%B2%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_34/_index.md b/content/zh/learn/level_4/lesson_34/_index.md new file mode 100644 index 000000000..81f0d7ebf --- /dev/null +++ b/content/zh/learn/level_4/lesson_34/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - MongoDB 服务部署 +weight: 34 + +_build: + render: false + +profit: 部署尚医通的中间件 MongoDB 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_34/content.md b/content/zh/learn/level_4/lesson_34/content.md new file mode 100644 index 000000000..8cbafa1f8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_34/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - MongoDB 服务部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#wVfb8 +--- diff --git a/content/zh/learn/level_4/lesson_34/video.md b/content/zh/learn/level_4/lesson_34/video.md new file mode 100644 index 000000000..55841a00d --- /dev/null +++ b/content/zh/learn/level_4/lesson_34/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - MongoDB 服务部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/109%E3%80%81devops-%E5%B0%9A%E5%8C%BB%E9%80%9A-%E4%B8%AD%E9%97%B4%E4%BB%B6-mongo%E9%83%A8%E7%BD%B2%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_35/_index.md b/content/zh/learn/level_4/lesson_35/_index.md new file mode 100644 index 000000000..40bcc72e2 --- /dev/null +++ b/content/zh/learn/level_4/lesson_35/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - MySQL 数据初始化 +weight: 35 + +_build: + render: false + +profit: 部署尚医通的中间件 MySQL 数据初始化 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_35/content.md b/content/zh/learn/level_4/lesson_35/content.md new file mode 100644 index 000000000..5914d8f3e --- /dev/null +++ b/content/zh/learn/level_4/lesson_35/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - MySQL 数据初始化 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#wVfb8 +--- diff --git a/content/zh/learn/level_4/lesson_35/video.md b/content/zh/learn/level_4/lesson_35/video.md new file mode 100644 index 000000000..765f13901 --- /dev/null +++ b/content/zh/learn/level_4/lesson_35/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - MySQL 数据初始化 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/110%E3%80%81devops-%E5%B0%9A%E5%8C%BB%E9%80%9A-%E4%B8%AD%E9%97%B4%E4%BB%B6-mysql%E6%95%B0%E6%8D%AE%E5%88%9D%E5%A7%8B%E5%8C%96.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_36/_index.md b/content/zh/learn/level_4/lesson_36/_index.md new file mode 100644 index 000000000..8065adfe5 --- /dev/null +++ b/content/zh/learn/level_4/lesson_36/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - Nacos 配置提取 +weight: 36 + +_build: + render: false + +profit: 部署尚医通的中间件 Nacos 配置提取 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_36/content.md b/content/zh/learn/level_4/lesson_36/content.md new file mode 100644 index 000000000..653170e3e --- /dev/null +++ b/content/zh/learn/level_4/lesson_36/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Nacos 配置提取 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#wVfb8 +--- diff --git a/content/zh/learn/level_4/lesson_36/video.md b/content/zh/learn/level_4/lesson_36/video.md new file mode 100644 index 000000000..01a4f86ae --- /dev/null +++ b/content/zh/learn/level_4/lesson_36/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Nacos 配置提取 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/111%E3%80%81devops-%E5%B0%9A%E5%8C%BB%E9%80%9A-%E7%94%9F%E4%BA%A7%E7%8E%AF%E5%A2%83%E9%85%8D%E7%BD%AE%E6%8A%BD%E5%8F%96.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_37/_index.md b/content/zh/learn/level_4/lesson_37/_index.md new file mode 100644 index 000000000..a4a72a4b8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_37/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 创建可视化流水线工程 +weight: 37 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线的创建 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_37/content.md b/content/zh/learn/level_4/lesson_37/content.md new file mode 100644 index 000000000..46c528ee3 --- /dev/null +++ b/content/zh/learn/level_4/lesson_37/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 创建可视化流水线工程 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_37/video.md b/content/zh/learn/level_4/lesson_37/video.md new file mode 100644 index 000000000..e21f78fc3 --- /dev/null +++ b/content/zh/learn/level_4/lesson_37/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 创建可视化流水线工程 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/112%E3%80%81devops-%E5%88%9B%E5%BB%BAdevops%E5%B7%A5%E7%A8%8B%26Jenkins%E5%8F%AF%E8%A7%86%E5%8C%96.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_38/_index.md b/content/zh/learn/level_4/lesson_38/_index.md new file mode 100644 index 000000000..b18d08cf0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_38/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线拉取代码 +weight: 38 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线拉取代码的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_38/content.md b/content/zh/learn/level_4/lesson_38/content.md new file mode 100644 index 000000000..b842b94d5 --- /dev/null +++ b/content/zh/learn/level_4/lesson_38/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线拉取代码 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_38/video.md b/content/zh/learn/level_4/lesson_38/video.md new file mode 100644 index 000000000..b163300a0 --- /dev/null +++ b/content/zh/learn/level_4/lesson_38/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线拉取代码 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/113%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%B8%80%E6%AD%A5-%E6%8B%89%E5%8F%96%E4%BB%A3%E7%A0%81.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_39/_index.md b/content/zh/learn/level_4/lesson_39/_index.md new file mode 100644 index 000000000..fc99c901a --- /dev/null +++ b/content/zh/learn/level_4/lesson_39/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线项目编译 +weight: 39 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线项目编译的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_39/content.md b/content/zh/learn/level_4/lesson_39/content.md new file mode 100644 index 000000000..71fe0657f --- /dev/null +++ b/content/zh/learn/level_4/lesson_39/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线项目编译 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_39/video.md b/content/zh/learn/level_4/lesson_39/video.md new file mode 100644 index 000000000..8e523f708 --- /dev/null +++ b/content/zh/learn/level_4/lesson_39/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线项目编译 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/114%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%8C%E6%AD%A5-%E9%A1%B9%E7%9B%AE%E7%BC%96%E8%AF%91.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_4/_index.md b/content/zh/learn/level_4/lesson_4/_index.md new file mode 100644 index 000000000..ca5f25081 --- /dev/null +++ b/content/zh/learn/level_4/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 应用部署三要素 +weight: 4 + +_build: + render: false + +profit: 了解 KubeSphere 的下部署应用的基础只是 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_4/content.md b/content/zh/learn/level_4/lesson_4/content.md new file mode 100644 index 000000000..50fdf725f --- /dev/null +++ b/content/zh/learn/level_4/lesson_4/content.md @@ -0,0 +1,7 @@ +--- +title: 应用部署三要素 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk +--- diff --git a/content/zh/learn/level_4/lesson_4/video.md b/content/zh/learn/level_4/lesson_4/video.md new file mode 100644 index 000000000..ddb4f3dcd --- /dev/null +++ b/content/zh/learn/level_4/lesson_4/video.md @@ -0,0 +1,8 @@ +--- +title: 应用部署三要素 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/80%E3%80%81KubeSphere%E7%BB%99Kubernetes%E4%B8%8A%E9%83%A8%E7%BD%B2%E4%B8%AD%E9%97%B4%E4%BB%B6-%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E4%B8%89%E8%A6%81%E7%B4%A0.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_40/_index.md b/content/zh/learn/level_4/lesson_40/_index.md new file mode 100644 index 000000000..c78f165c5 --- /dev/null +++ b/content/zh/learn/level_4/lesson_40/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线构建镜像的基本设置 +weight: 40 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线构建镜像的基本设置 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_40/content.md b/content/zh/learn/level_4/lesson_40/content.md new file mode 100644 index 000000000..3eb0797de --- /dev/null +++ b/content/zh/learn/level_4/lesson_40/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线构建镜像的基本设置 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_40/video.md b/content/zh/learn/level_4/lesson_40/video.md new file mode 100644 index 000000000..e6f226c8b --- /dev/null +++ b/content/zh/learn/level_4/lesson_40/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线构建镜像的基本设置 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/115%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%B8%89%E6%AD%A5-%E6%9E%84%E5%BB%BA%E9%95%9C%E5%83%8F-%E5%9F%BA%E6%9C%AC%E8%AE%BE%E7%BD%AE.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_41/_index.md b/content/zh/learn/level_4/lesson_41/_index.md new file mode 100644 index 000000000..07cd3ab68 --- /dev/null +++ b/content/zh/learn/level_4/lesson_41/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线并发构建镜像 +weight: 41 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线并发构建镜像的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_41/content.md b/content/zh/learn/level_4/lesson_41/content.md new file mode 100644 index 000000000..5ede84742 --- /dev/null +++ b/content/zh/learn/level_4/lesson_41/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线并发构建镜像 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_41/video.md b/content/zh/learn/level_4/lesson_41/video.md new file mode 100644 index 000000000..50eedda3d --- /dev/null +++ b/content/zh/learn/level_4/lesson_41/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线并发构建镜像 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/116%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%B8%89%E6%AD%A5-%E6%9E%84%E5%BB%BA%E9%95%9C%E5%83%8F-%E5%B9%B6%E5%8F%91%E6%9E%84%E5%BB%BA.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_42/_index.md b/content/zh/learn/level_4/lesson_42/_index.md new file mode 100644 index 000000000..879d69713 --- /dev/null +++ b/content/zh/learn/level_4/lesson_42/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线推送镜像的基本操作 +weight: 42 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线推送镜像的基本操作 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_42/content.md b/content/zh/learn/level_4/lesson_42/content.md new file mode 100644 index 000000000..a4f9ee15f --- /dev/null +++ b/content/zh/learn/level_4/lesson_42/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线推送镜像的基本操作 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_42/video.md b/content/zh/learn/level_4/lesson_42/video.md new file mode 100644 index 000000000..1217484cc --- /dev/null +++ b/content/zh/learn/level_4/lesson_42/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线推送镜像的基本操作 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/117%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E5%9B%9B%E6%AD%A5-%E6%8E%A8%E9%80%81%E9%95%9C%E5%83%8F-%E5%9F%BA%E7%A1%80%E6%93%8D%E4%BD%9C.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_43/_index.md b/content/zh/learn/level_4/lesson_43/_index.md new file mode 100644 index 000000000..6711c502a --- /dev/null +++ b/content/zh/learn/level_4/lesson_43/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线并发推送镜像 +weight: 43 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线并发推送镜像的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_43/content.md b/content/zh/learn/level_4/lesson_43/content.md new file mode 100644 index 000000000..d6b534328 --- /dev/null +++ b/content/zh/learn/level_4/lesson_43/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线并发推送镜像 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_43/video.md b/content/zh/learn/level_4/lesson_43/video.md new file mode 100644 index 000000000..b64f2584d --- /dev/null +++ b/content/zh/learn/level_4/lesson_43/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线并发推送镜像 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/118%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E5%9B%9B%E6%AD%A5-%E6%8E%A8%E9%80%81%E9%95%9C%E5%83%8F-%E5%B9%B6%E5%8F%91%E6%8E%A8%E9%80%81%E5%AE%8C%E6%88%90.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_44/_index.md b/content/zh/learn/level_4/lesson_44/_index.md new file mode 100644 index 000000000..154abf2c8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_44/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境 +weight: 44 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线部署到 dev 的过程 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_44/content.md b/content/zh/learn/level_4/lesson_44/content.md new file mode 100644 index 000000000..787ec8b09 --- /dev/null +++ b/content/zh/learn/level_4/lesson_44/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_44/video.md b/content/zh/learn/level_4/lesson_44/video.md new file mode 100644 index 000000000..00338f604 --- /dev/null +++ b/content/zh/learn/level_4/lesson_44/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/119%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%94%E6%AD%A5-%E9%83%A8%E7%BD%B2%E5%88%B0dev%E7%8E%AF%E5%A2%83.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_45/_index.md b/content/zh/learn/level_4/lesson_45/_index.md new file mode 100644 index 000000000..6882eb133 --- /dev/null +++ b/content/zh/learn/level_4/lesson_45/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境问题解决 +weight: 45 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线部署到 dev 的过程出现的问题 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_45/content.md b/content/zh/learn/level_4/lesson_45/content.md new file mode 100644 index 000000000..e122fe5ff --- /dev/null +++ b/content/zh/learn/level_4/lesson_45/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_45/video.md b/content/zh/learn/level_4/lesson_45/video.md new file mode 100644 index 000000000..8d0e2e30f --- /dev/null +++ b/content/zh/learn/level_4/lesson_45/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/120%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%94%E6%AD%A5-%E9%83%A8%E7%BD%B2dev%E5%90%84%E7%A7%8D%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_46/_index.md b/content/zh/learn/level_4/lesson_46/_index.md new file mode 100644 index 000000000..bc710a218 --- /dev/null +++ b/content/zh/learn/level_4/lesson_46/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境内存不足的问题解决 +weight: 46 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线部署到 dev 的过程出现的内存问题 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_46/content.md b/content/zh/learn/level_4/lesson_46/content.md new file mode 100644 index 000000000..ad9bdd264 --- /dev/null +++ b/content/zh/learn/level_4/lesson_46/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境内存不足的问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_46/video.md b/content/zh/learn/level_4/lesson_46/video.md new file mode 100644 index 000000000..7b95b2ea8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_46/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境内存不足的问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/121%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%94%E6%AD%A5-%E9%83%A8%E7%BD%B2%E5%86%85%E5%AD%98%E4%B8%8D%E8%B6%B3%E7%AD%89%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_47/_index.md b/content/zh/learn/level_4/lesson_47/_index.md new file mode 100644 index 000000000..e8790001f --- /dev/null +++ b/content/zh/learn/level_4/lesson_47/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境文件编码问题解决 +weight: 47 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线部署到 dev 的过程出现的文件编码问题 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_47/content.md b/content/zh/learn/level_4/lesson_47/content.md new file mode 100644 index 000000000..d7d0a0194 --- /dev/null +++ b/content/zh/learn/level_4/lesson_47/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境文件编码问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_47/video.md b/content/zh/learn/level_4/lesson_47/video.md new file mode 100644 index 000000000..29a4ae6f3 --- /dev/null +++ b/content/zh/learn/level_4/lesson_47/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境文件编码问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/122%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%94%E6%AD%A5-%E6%96%87%E4%BB%B6%E7%BC%96%E7%A0%81%E9%97%AE%E9%A2%98.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_48/_index.md b/content/zh/learn/level_4/lesson_48/_index.md new file mode 100644 index 000000000..295d930ec --- /dev/null +++ b/content/zh/learn/level_4/lesson_48/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境就绪探针问题解决 +weight: 48 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线部署到 dev 的过程出现的就绪探针问题 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_48/content.md b/content/zh/learn/level_4/lesson_48/content.md new file mode 100644 index 000000000..6f07d47e2 --- /dev/null +++ b/content/zh/learn/level_4/lesson_48/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境就绪探针问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_48/video.md b/content/zh/learn/level_4/lesson_48/video.md new file mode 100644 index 000000000..da84162ec --- /dev/null +++ b/content/zh/learn/level_4/lesson_48/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线部署到 dev 环境就绪探针问题解决 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/123%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E4%BA%94%E6%AD%A5-%E5%B0%B1%E7%BB%AA%E6%8E%A2%E9%92%88%E9%97%AE%E9%A2%98.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_49/_index.md b/content/zh/learn/level_4/lesson_49/_index.md new file mode 100644 index 000000000..7cf3b225a --- /dev/null +++ b/content/zh/learn/level_4/lesson_49/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 流水线系统邮件功能 +weight: 49 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 流水线系统邮件功能 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_49/content.md b/content/zh/learn/level_4/lesson_49/content.md new file mode 100644 index 000000000..71f66b534 --- /dev/null +++ b/content/zh/learn/level_4/lesson_49/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线系统邮件功能 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_49/video.md b/content/zh/learn/level_4/lesson_49/video.md new file mode 100644 index 000000000..761b590fa --- /dev/null +++ b/content/zh/learn/level_4/lesson_49/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 流水线系统邮件功能 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/124%E3%80%81devops-%E5%8F%AF%E8%A7%86%E5%8C%96Pipeline-%E7%AC%AC%E5%85%AD%E6%AD%A5-%E7%B3%BB%E7%BB%9F%E9%82%AE%E4%BB%B6%E5%8A%9F%E8%83%BD.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_5/_index.md b/content/zh/learn/level_4/lesson_5/_index.md new file mode 100644 index 000000000..1dc646734 --- /dev/null +++ b/content/zh/learn/level_4/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 部署 MySQL 服务 +weight: 5 + +_build: + render: false + +profit: 了解 KubeSphere 下的 MySQL 服务部署 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_5/content.md b/content/zh/learn/level_4/lesson_5/content.md new file mode 100644 index 000000000..e0c388eab --- /dev/null +++ b/content/zh/learn/level_4/lesson_5/content.md @@ -0,0 +1,7 @@ +--- +title: 部署 MySQL 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#Zgaes +--- diff --git a/content/zh/learn/level_4/lesson_5/video.md b/content/zh/learn/level_4/lesson_5/video.md new file mode 100644 index 000000000..fb1b08acf --- /dev/null +++ b/content/zh/learn/level_4/lesson_5/video.md @@ -0,0 +1,8 @@ +--- +title: 部署 MySQL 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/81%E3%80%81KubeSphere%E7%BB%99Kubernetes%E4%B8%8A%E9%83%A8%E7%BD%B2%E4%B8%AD%E9%97%B4%E4%BB%B6-%E9%83%A8%E7%BD%B2MySQL%E6%9C%89%E7%8A%B6%E6%80%81%E5%89%AF%E6%9C%AC%E9%9B%86.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_50/_index.md b/content/zh/learn/level_4/lesson_50/_index.md new file mode 100644 index 000000000..7f91b7555 --- /dev/null +++ b/content/zh/learn/level_4/lesson_50/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 后端部署完成以及验证 +weight: 50 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 后端部署完成以及验证 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_50/content.md b/content/zh/learn/level_4/lesson_50/content.md new file mode 100644 index 000000000..70d222cfa --- /dev/null +++ b/content/zh/learn/level_4/lesson_50/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 后端部署完成以及验证 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_50/video.md b/content/zh/learn/level_4/lesson_50/video.md new file mode 100644 index 000000000..3553bbbd9 --- /dev/null +++ b/content/zh/learn/level_4/lesson_50/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 后端部署完成以及验证 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/125%E3%80%81devops-%E5%90%8E%E5%8F%B0%E5%BA%94%E7%94%A8%E9%83%A8%E7%BD%B2%E5%AE%8C%E6%88%90%26%E6%95%B0%E6%8D%AE%E9%AA%8C%E8%AF%81.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_51/_index.md b/content/zh/learn/level_4/lesson_51/_index.md new file mode 100644 index 000000000..e5115f93f --- /dev/null +++ b/content/zh/learn/level_4/lesson_51/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - vue-admin 前端项目部署 +weight: 51 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 下 vue-admin 前端项目部署 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_51/content.md b/content/zh/learn/level_4/lesson_51/content.md new file mode 100644 index 000000000..ae1e1fbcb --- /dev/null +++ b/content/zh/learn/level_4/lesson_51/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - vue-admin 前端项目部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_51/video.md b/content/zh/learn/level_4/lesson_51/video.md new file mode 100644 index 000000000..174c09b15 --- /dev/null +++ b/content/zh/learn/level_4/lesson_51/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - vue-admin 前端项目部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/126%E3%80%81devops-Jenkinsfile-%E9%83%A8%E7%BD%B2vue-admin%E9%A1%B9%E7%9B%AE.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_52/_index.md b/content/zh/learn/level_4/lesson_52/_index.md new file mode 100644 index 000000000..e94e33d4f --- /dev/null +++ b/content/zh/learn/level_4/lesson_52/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - 最后优化 +weight: 52 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 下部署完最后优化 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_52/content.md b/content/zh/learn/level_4/lesson_52/content.md new file mode 100644 index 000000000..a89087570 --- /dev/null +++ b/content/zh/learn/level_4/lesson_52/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 最后优化 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_52/video.md b/content/zh/learn/level_4/lesson_52/video.md new file mode 100644 index 000000000..443c7ef48 --- /dev/null +++ b/content/zh/learn/level_4/lesson_52/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - 最后优化 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/127%E3%80%81devops-%E6%9C%80%E5%90%8E%E7%9A%84%E9%83%A8%E7%BD%B2%E4%BC%98%E5%8C%96.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_53/_index.md b/content/zh/learn/level_4/lesson_53/_index.md new file mode 100644 index 000000000..32ae05da1 --- /dev/null +++ b/content/zh/learn/level_4/lesson_53/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: “尚医通” 微服务项目 DevOps 实战 - Webhook 自启动流水线 +weight: 53 + +_build: + render: false + +profit: 了解 KubeSphere 的 DevOps 下 Webhook 自启动流水线 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_53/content.md b/content/zh/learn/level_4/lesson_53/content.md new file mode 100644 index 000000000..046ab1d1d --- /dev/null +++ b/content/zh/learn/level_4/lesson_53/content.md @@ -0,0 +1,7 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Webhook 自启动流水线 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_53/video.md b/content/zh/learn/level_4/lesson_53/video.md new file mode 100644 index 000000000..8a6a8f1f8 --- /dev/null +++ b/content/zh/learn/level_4/lesson_53/video.md @@ -0,0 +1,8 @@ +--- +title: “尚医通” 微服务项目 DevOps 实战 - Webhook 自启动流水线 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/128%E3%80%81devops-webhook%E8%87%AA%E5%8A%A8%E5%90%AF%E5%8A%A8%E6%B5%81%E6%B0%B4%E7%BA%BF%E8%BF%9B%E8%A1%8C%E6%9E%84%E5%BB%BA.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_54/_index.md b/content/zh/learn/level_4/lesson_54/_index.md new file mode 100644 index 000000000..7df887af7 --- /dev/null +++ b/content/zh/learn/level_4/lesson_54/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 第一期结束小结 +weight: 54 + +_build: + render: false + +profit: 总结 KubeSphere 的课程包含内容 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_54/content.md b/content/zh/learn/level_4/lesson_54/content.md new file mode 100644 index 000000000..dd808d2fb --- /dev/null +++ b/content/zh/learn/level_4/lesson_54/content.md @@ -0,0 +1,7 @@ +--- +title: 第一期结束小结 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/bp7pnm#ZDPfS +--- diff --git a/content/zh/learn/level_4/lesson_54/video.md b/content/zh/learn/level_4/lesson_54/video.md new file mode 100644 index 000000000..7013a1abc --- /dev/null +++ b/content/zh/learn/level_4/lesson_54/video.md @@ -0,0 +1,8 @@ +--- +title: 第一期结束小结 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/129%E3%80%81%E5%B0%8F%E7%BB%93-%E5%90%8E%E4%BC%9A%E6%9C%89%E6%9C%9F.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_6/_index.md b/content/zh/learn/level_4/lesson_6/_index.md new file mode 100644 index 000000000..b1fc4e069 --- /dev/null +++ b/content/zh/learn/level_4/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 设置 MySQL 的负载均衡网络 +weight: 6 + +_build: + render: false + +profit: 了解 KubeSphere 下的负载均衡网络以及配置 MySQL 的负载均衡网络 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_6/content.md b/content/zh/learn/level_4/lesson_6/content.md new file mode 100644 index 000000000..c9986fc35 --- /dev/null +++ b/content/zh/learn/level_4/lesson_6/content.md @@ -0,0 +1,7 @@ +--- +title: 设置 MySQL 的负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#Zgaes +--- diff --git a/content/zh/learn/level_4/lesson_6/video.md b/content/zh/learn/level_4/lesson_6/video.md new file mode 100644 index 000000000..7a4ed7f73 --- /dev/null +++ b/content/zh/learn/level_4/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: 设置 MySQL 的负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/82%E3%80%81KubeSphere%E7%BB%99Kubernetes%E4%B8%8A%E9%83%A8%E7%BD%B2%E4%B8%AD%E9%97%B4%E4%BB%B6-%E9%83%A8%E7%BD%B2MySQL%E8%B4%9F%E8%BD%BD%E5%9D%87%E8%A1%A1%E7%BD%91%E7%BB%9C.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_7/_index.md b/content/zh/learn/level_4/lesson_7/_index.md new file mode 100644 index 000000000..da0e638ee --- /dev/null +++ b/content/zh/learn/level_4/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 部署 Redis 服务以及负载均衡网络 +weight: 7 + +_build: + render: false + +profit: 了解 KubeSphere 的下 Redis 的部署和负载均衡网络配置 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_7/content.md b/content/zh/learn/level_4/lesson_7/content.md new file mode 100644 index 000000000..f9973b085 --- /dev/null +++ b/content/zh/learn/level_4/lesson_7/content.md @@ -0,0 +1,7 @@ +--- +title: 部署 Redis 服务以及负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#O8sm1 +--- diff --git a/content/zh/learn/level_4/lesson_7/video.md b/content/zh/learn/level_4/lesson_7/video.md new file mode 100644 index 000000000..d19c13661 --- /dev/null +++ b/content/zh/learn/level_4/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: 部署 Redis 服务以及负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/83%E3%80%81KubeSphere%E7%BB%99Kubernetes%E4%B8%8A%E9%83%A8%E7%BD%B2%E4%B8%AD%E9%97%B4%E4%BB%B6-%E9%83%A8%E7%BD%B2Redis%26%E8%AE%BE%E7%BD%AE%E7%BD%91%E7%BB%9C.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_8/_index.md b/content/zh/learn/level_4/lesson_8/_index.md new file mode 100644 index 000000000..0d03dcda1 --- /dev/null +++ b/content/zh/learn/level_4/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 部署 ElasticSearch 服务以及负载均衡网络 +weight: 8 + +_build: + render: false + +profit: 了解 KubeSphere 下的 ElasticSearch 的部署和负载均衡网络配置 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_8/content.md b/content/zh/learn/level_4/lesson_8/content.md new file mode 100644 index 000000000..b326c0038 --- /dev/null +++ b/content/zh/learn/level_4/lesson_8/content.md @@ -0,0 +1,7 @@ +--- +title: 部署 ElasticSearch 服务以及负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#lYNS4 +--- diff --git a/content/zh/learn/level_4/lesson_8/video.md b/content/zh/learn/level_4/lesson_8/video.md new file mode 100644 index 000000000..8b1739f0a --- /dev/null +++ b/content/zh/learn/level_4/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: 部署 ElasticSearch 服务以及负载均衡网络 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/84%E3%80%81KubeSphere%E7%BB%99Kubernetes%E4%B8%8A%E9%83%A8%E7%BD%B2%E4%B8%AD%E9%97%B4%E4%BB%B6-%E9%83%A8%E7%BD%B2ElasticSearch.mp4 +--- diff --git a/content/zh/learn/level_4/lesson_9/_index.md b/content/zh/learn/level_4/lesson_9/_index.md new file mode 100644 index 000000000..7b6a1f1bb --- /dev/null +++ b/content/zh/learn/level_4/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 通过应用商店部署 RabbitMQ 服务 +weight: 9 + +_build: + render: false + +profit: 了解 KubeSphere 下的应用商店的使用并且部署 RabbitMQ 服务 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_4/lesson_9/content.md b/content/zh/learn/level_4/lesson_9/content.md new file mode 100644 index 000000000..d6ba7397c --- /dev/null +++ b/content/zh/learn/level_4/lesson_9/content.md @@ -0,0 +1,7 @@ +--- +title: 通过应用商店部署 RabbitMQ 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://www.yuque.com/leifengyang/oncloud/vgf9wk#J49Ly +--- diff --git a/content/zh/learn/level_4/lesson_9/video.md b/content/zh/learn/level_4/lesson_9/video.md new file mode 100644 index 000000000..7bd906136 --- /dev/null +++ b/content/zh/learn/level_4/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: 通过应用商店部署 RabbitMQ 服务 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/85%E3%80%81KubeSphere-%E5%BA%94%E7%94%A8%E5%95%86%E5%BA%97-%E9%83%A8%E7%BD%B2RabbitMQ.mp4 +--- diff --git a/content/zh/learn/level_5/_index.md b/content/zh/learn/level_5/_index.md new file mode 100644 index 000000000..ae170fc3e --- /dev/null +++ b/content/zh/learn/level_5/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第五章:使用 KubeKey 部署与运维 K8s 与 KubeSphere 集群 +weight: 5 + +_build: + render: false +--- diff --git a/content/zh/learn/level_5/lesson_1/_index.md b/content/zh/learn/level_5/lesson_1/_index.md new file mode 100644 index 000000000..e0c615c68 --- /dev/null +++ b/content/zh/learn/level_5/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 课程简介 +weight: 1 + +_build: + render: false + +profit: 从整体了解课程大纲 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_1/content.md b/content/zh/learn/level_5/lesson_1/content.md new file mode 100644 index 000000000..0e9adb3ed --- /dev/null +++ b/content/zh/learn/level_5/lesson_1/content.md @@ -0,0 +1,7 @@ +--- +title: 课程简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: +--- diff --git a/content/zh/learn/level_5/lesson_1/video.md b/content/zh/learn/level_5/lesson_1/video.md new file mode 100644 index 000000000..afd76daf0 --- /dev/null +++ b/content/zh/learn/level_5/lesson_1/video.md @@ -0,0 +1,8 @@ +--- +title: 课程简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/00%E3%80%81%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98-%E7%AC%AC%E4%BA%8C%E5%AD%A3-%E8%AF%BE%E7%A8%8B%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_2/_index.md b/content/zh/learn/level_5/lesson_2/_index.md new file mode 100644 index 000000000..058d00ada --- /dev/null +++ b/content/zh/learn/level_5/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-KubeKey 简介 +weight: 2 + +_build: + render: false + +profit: 了解 KubeKey 的功能及使用方式 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_2/content.md b/content/zh/learn/level_5/lesson_2/content.md new file mode 100644 index 000000000..b3bf73c78 --- /dev/null +++ b/content/zh/learn/level_5/lesson_2/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-KubeKey 简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeKey%20%E5%AE%9E%E8%B7%B5-KubeKey%20%E7%AE%80%E4%BB%8B.pdf +--- diff --git a/content/zh/learn/level_5/lesson_2/video.md b/content/zh/learn/level_5/lesson_2/video.md new file mode 100644 index 000000000..3be35f813 --- /dev/null +++ b/content/zh/learn/level_5/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-KubeKey 简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/01%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-KubeKey%20%E7%AE%80%E4%BB%8B.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_3/_index.md b/content/zh/learn/level_5/lesson_3/_index.md new file mode 100644 index 000000000..8d45cdc16 --- /dev/null +++ b/content/zh/learn/level_5/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-部署高可用 Kubernetes 集群 +weight: 3 + +_build: + render: false + +profit: 了解如何使用 KubeKey 部署高可用集群 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_3/content.md b/content/zh/learn/level_5/lesson_3/content.md new file mode 100644 index 000000000..66ae60eb6 --- /dev/null +++ b/content/zh/learn/level_5/lesson_3/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-部署高可用 Kubernetes 集群 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeKey%20%E5%AE%9E%E8%B7%B5-%E9%83%A8%E7%BD%B2%E9%AB%98%E5%8F%AF%E7%94%A8%20Kubernetes%20%E9%9B%86%E7%BE%A4.pdf +--- diff --git a/content/zh/learn/level_5/lesson_3/video.md b/content/zh/learn/level_5/lesson_3/video.md new file mode 100644 index 000000000..c7b630b7c --- /dev/null +++ b/content/zh/learn/level_5/lesson_3/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-部署高可用 Kubernetes 集群 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/02%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-%E9%83%A8%E7%BD%B2%E9%AB%98%E5%8F%AF%E7%94%A8%20Kubernetes%20%E9%9B%86%E7%BE%A4.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_4/_index.md b/content/zh/learn/level_5/lesson_4/_index.md new file mode 100644 index 000000000..beb44cab8 --- /dev/null +++ b/content/zh/learn/level_5/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-KubeKey 集群配置文件详解 +weight: 4 + +_build: + render: false + +profit: 了解 KubeKey 的集群配置文件 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_4/content.md b/content/zh/learn/level_5/lesson_4/content.md new file mode 100644 index 000000000..56f528e14 --- /dev/null +++ b/content/zh/learn/level_5/lesson_4/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-KubeKey 集群配置文件详解 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeKey%20%E5%AE%9E%E8%B7%B5-KubeKey%20%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E8%AF%A6%E8%A7%A3.pdf +--- diff --git a/content/zh/learn/level_5/lesson_4/video.md b/content/zh/learn/level_5/lesson_4/video.md new file mode 100644 index 000000000..7e33bdbd1 --- /dev/null +++ b/content/zh/learn/level_5/lesson_4/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-KubeKey 集群配置文件详解 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/03%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-KubeKey%20%E9%9B%86%E7%BE%A4%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E8%AF%A6%E8%A7%A3.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_5/_index.md b/content/zh/learn/level_5/lesson_5/_index.md new file mode 100644 index 000000000..049303518 --- /dev/null +++ b/content/zh/learn/level_5/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-Kubernetes 增删集群节点 +weight: 5 + +_build: + render: false + +profit: 了解使用 KubeKey 增删集群节点的操作步骤 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_5/content.md b/content/zh/learn/level_5/lesson_5/content.md new file mode 100644 index 000000000..c6d40fd0c --- /dev/null +++ b/content/zh/learn/level_5/lesson_5/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-KubeKey 集群配置文件详解 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeKey%20%E5%AE%9E%E8%B7%B5-Kubernetes%20%E5%A2%9E%E5%88%A0%E9%9B%86%E7%BE%A4%E8%8A%82%E7%82%B9.pdf +--- diff --git a/content/zh/learn/level_5/lesson_5/video.md b/content/zh/learn/level_5/lesson_5/video.md new file mode 100644 index 000000000..1e08a771b --- /dev/null +++ b/content/zh/learn/level_5/lesson_5/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-KubeKey 集群配置文件详解 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/04%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-Kubernetes%20%E5%A2%9E%E5%88%A0%E9%9B%86%E7%BE%A4%E8%8A%82%E7%82%B9.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_6/_index.md b/content/zh/learn/level_5/lesson_6/_index.md new file mode 100644 index 000000000..a11ecf4e6 --- /dev/null +++ b/content/zh/learn/level_5/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-Kubernetes 集群证书管理 +weight: 6 + +_build: + render: false + +profit: 了解使用 KubeKey 管理集群证书 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_6/content.md b/content/zh/learn/level_5/lesson_6/content.md new file mode 100644 index 000000000..e52726bee --- /dev/null +++ b/content/zh/learn/level_5/lesson_6/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-Kubernetes 集群证书管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeKey%20%E5%AE%9E%E8%B7%B5-Kubernetes%20%E9%9B%86%E7%BE%A4%E8%AF%81%E4%B9%A6%E7%AE%A1%E7%90%86.pdf +--- diff --git a/content/zh/learn/level_5/lesson_6/video.md b/content/zh/learn/level_5/lesson_6/video.md new file mode 100644 index 000000000..46db43e38 --- /dev/null +++ b/content/zh/learn/level_5/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-Kubernetes 集群证书管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/05%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-Kubernetes%20%E9%9B%86%E7%BE%A4%E8%AF%81%E4%B9%A6%E7%AE%A1%E7%90%86.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_7/_index.md b/content/zh/learn/level_5/lesson_7/_index.md new file mode 100644 index 000000000..75523c402 --- /dev/null +++ b/content/zh/learn/level_5/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-KubeSphere 启用可插拔组件 +weight: 7 + +_build: + render: false + +profit: 了解如何在 KubeSphere 中启用可插拔组件 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_7/content.md b/content/zh/learn/level_5/lesson_7/content.md new file mode 100644 index 000000000..50c135f6c --- /dev/null +++ b/content/zh/learn/level_5/lesson_7/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-KubeSphere 启用可插拔组件 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: +--- diff --git a/content/zh/learn/level_5/lesson_7/video.md b/content/zh/learn/level_5/lesson_7/video.md new file mode 100644 index 000000000..ab48f9321 --- /dev/null +++ b/content/zh/learn/level_5/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-KubeSphere 启用可插拔组件 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/06%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-KubeSphere%20%E5%90%AF%E7%94%A8%E5%8F%AF%E6%8F%92%E6%8B%94%E7%BB%84%E4%BB%B6.mp4 +--- diff --git a/content/zh/learn/level_5/lesson_8/_index.md b/content/zh/learn/level_5/lesson_8/_index.md new file mode 100644 index 000000000..042fc4f22 --- /dev/null +++ b/content/zh/learn/level_5/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeKey 实践-Kubernetes 节点管理 +weight: 8 + +_build: + render: false + +profit: 了解集群管理员可查看的集群节点信息和可执行的操作 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_5/lesson_8/content.md b/content/zh/learn/level_5/lesson_8/content.md new file mode 100644 index 000000000..95b404352 --- /dev/null +++ b/content/zh/learn/level_5/lesson_8/content.md @@ -0,0 +1,7 @@ +--- +title: KubeKey 实践-Kubernetes 节点管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: +--- diff --git a/content/zh/learn/level_5/lesson_8/video.md b/content/zh/learn/level_5/lesson_8/video.md new file mode 100644 index 000000000..ddd7bb6fe --- /dev/null +++ b/content/zh/learn/level_5/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: KubeKey 实践-Kubernetes 节点管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/07%E3%80%81KubeKey%20%E5%AE%9E%E8%B7%B5-Kubernetes%20%E8%8A%82%E7%82%B9%E7%AE%A1%E7%90%86.mp4 +--- diff --git a/content/zh/learn/level_6/_index.md b/content/zh/learn/level_6/_index.md new file mode 100644 index 000000000..639fd590b --- /dev/null +++ b/content/zh/learn/level_6/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第六章:DevOps 基础与实践 +weight: 6 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_1/_index.md b/content/zh/learn/level_6/lesson_1/_index.md new file mode 100644 index 000000000..bc23e1f54 --- /dev/null +++ b/content/zh/learn/level_6/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 基础-Git 版本控制与基本概念 +weight: 1 + +_build: + render: false + +profit: 了解什么是版本控制,Git 是什么 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_1/content.md b/content/zh/learn/level_6/lesson_1/content.md new file mode 100644 index 000000000..4b4a61ab9 --- /dev/null +++ b/content/zh/learn/level_6/lesson_1/content.md @@ -0,0 +1,9 @@ +--- +title: DevOps 基础-Git 版本控制与基本概念 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%9F%BA%E7%A1%80-Git%20%E7%89%88%E6%9C%AC%E6%8E%A7%E5%88%B6%E4%B8%8E%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5.pdf + +--- + diff --git a/content/zh/learn/level_6/lesson_1/video.md b/content/zh/learn/level_6/lesson_1/video.md new file mode 100644 index 000000000..31da72370 --- /dev/null +++ b/content/zh/learn/level_6/lesson_1/video.md @@ -0,0 +1,9 @@ +--- +title: DevOps 基础-Git 版本控制与基本概念 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/08%E3%80%81DevOps%20%E5%9F%BA%E7%A1%80-Git%20%E7%89%88%E6%9C%AC%E6%8E%A7%E5%88%B6%E4%B8%8E%E5%9F%BA%E6%9C%AC%E6%A6%82%E5%BF%B5.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_10/_index.md b/content/zh/learn/level_6/lesson_10/_index.md new file mode 100644 index 000000000..a8a501402 --- /dev/null +++ b/content/zh/learn/level_6/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-使用 Binary-to-Image 发布应用 +weight: 10 + +_build: + render: false + +profit: B2I 实操演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_10/content.md b/content/zh/learn/level_6/lesson_10/content.md new file mode 100644 index 000000000..ee7b3f9c5 --- /dev/null +++ b/content/zh/learn/level_6/lesson_10/content.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-使用 Binary-to-Image 发布应用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: + +--- + diff --git a/content/zh/learn/level_6/lesson_10/video.md b/content/zh/learn/level_6/lesson_10/video.md new file mode 100644 index 000000000..54343f160 --- /dev/null +++ b/content/zh/learn/level_6/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-使用 Binary-to-Image 发布应用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/17%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-%E4%BD%BF%E7%94%A8%20Binary-to-Image%20%E5%8F%91%E5%B8%83%E5%BA%94%E7%94%A8.mp4 +--- diff --git a/content/zh/learn/level_6/lesson_11/_index.md b/content/zh/learn/level_6/lesson_11/_index.md new file mode 100644 index 000000000..545f6e3bb --- /dev/null +++ b/content/zh/learn/level_6/lesson_11/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-使用 Jenkinsfile 创建流水线 +weight: 11 + +_build: + render: false + +profit: 使用 Jenkinsfile 创建流水线实操演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_11/content.md b/content/zh/learn/level_6/lesson_11/content.md new file mode 100644 index 000000000..5af2471d6 --- /dev/null +++ b/content/zh/learn/level_6/lesson_11/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-使用 Jenkinsfile 创建流水线 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: +--- + diff --git a/content/zh/learn/level_6/lesson_11/video.md b/content/zh/learn/level_6/lesson_11/video.md new file mode 100644 index 000000000..511314719 --- /dev/null +++ b/content/zh/learn/level_6/lesson_11/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-使用 Jenkinsfile 创建流水线 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/18%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-%E4%BD%BF%E7%94%A8%20Jenkinsfile%20%E5%88%9B%E5%BB%BA%E6%B5%81%E6%B0%B4%E7%BA%BF.mp4 +--- diff --git a/content/zh/learn/level_6/lesson_2/_index.md b/content/zh/learn/level_6/lesson_2/_index.md new file mode 100644 index 000000000..70564091a --- /dev/null +++ b/content/zh/learn/level_6/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 基础-Git 基本命令使用示例 +weight: 2 + +_build: + render: false + +profit: 学会 Git 的基本命令使用 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_2/content.md b/content/zh/learn/level_6/lesson_2/content.md new file mode 100644 index 000000000..411fe0141 --- /dev/null +++ b/content/zh/learn/level_6/lesson_2/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 基础-Git 基本命令使用示例 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf +--- + diff --git a/content/zh/learn/level_6/lesson_2/video.md b/content/zh/learn/level_6/lesson_2/video.md new file mode 100644 index 000000000..b89eaf9a7 --- /dev/null +++ b/content/zh/learn/level_6/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 基础-Git 基本命令使用示例 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/09%E3%80%81DevOps%20%E5%9F%BA%E7%A1%80-Git%20%E5%9F%BA%E6%9C%AC%E5%91%BD%E4%BB%A4%E4%BD%BF%E7%94%A8%E7%A4%BA%E4%BE%8B.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_3/_index.md b/content/zh/learn/level_6/lesson_3/_index.md new file mode 100644 index 000000000..24aa144a8 --- /dev/null +++ b/content/zh/learn/level_6/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 基础-DevOps 元素周期表 +weight: 3 + +_build: + render: false + +profit: 了解 DevOps 及 DevOps 元素周期表 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_3/content.md b/content/zh/learn/level_6/lesson_3/content.md new file mode 100644 index 000000000..b2bd7df5b --- /dev/null +++ b/content/zh/learn/level_6/lesson_3/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 基础-DevOps 元素周期表 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://kubesphere-community.pek3b.qingstor.com/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%85%A5%E9%97%A8%E5%AE%9E%E6%88%98%20-%20DevOps%20%E5%85%83%E7%B4%A0%E5%91%A8%E6%9C%9F%E8%A1%A8.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_3/video.md b/content/zh/learn/level_6/lesson_3/video.md new file mode 100644 index 000000000..4d23ec76a --- /dev/null +++ b/content/zh/learn/level_6/lesson_3/video.md @@ -0,0 +1,9 @@ +--- +title: DevOps 基础-DevOps 元素周期表 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/10%E3%80%81DevOps%20%E5%9F%BA%E7%A1%80-DevOps%20%E5%85%83%E7%B4%A0%E5%91%A8%E6%9C%9F%E8%A1%A8.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_4/_index.md b/content/zh/learn/level_6/lesson_4/_index.md new file mode 100644 index 000000000..a21cb3779 --- /dev/null +++ b/content/zh/learn/level_6/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-DevOps 生命周期 +weight: 4 + +_build: + render: false + +profit: 了解 DevOps 的生命周期 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_4/content.md b/content/zh/learn/level_6/lesson_4/content.md new file mode 100644 index 000000000..fcb24d4ba --- /dev/null +++ b/content/zh/learn/level_6/lesson_4/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-DevOps 生命周期 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_4/video.md b/content/zh/learn/level_6/lesson_4/video.md new file mode 100644 index 000000000..f593dfeb5 --- /dev/null +++ b/content/zh/learn/level_6/lesson_4/video.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-DevOps 生命周期 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/11%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-DevOps%20%E7%94%9F%E5%91%BD%E5%91%A8%E6%9C%9F.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_5/_index.md b/content/zh/learn/level_6/lesson_5/_index.md new file mode 100644 index 000000000..13302e6c8 --- /dev/null +++ b/content/zh/learn/level_6/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-DevOps 流水线准备 +weight: 5 + +_build: + render: false + +profit: 了解如何将 SonarQube 集成到流水线 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_5/content.md b/content/zh/learn/level_6/lesson_5/content.md new file mode 100644 index 000000000..ae5034855 --- /dev/null +++ b/content/zh/learn/level_6/lesson_5/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-DevOps 流水线准备 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_5/video.md b/content/zh/learn/level_6/lesson_5/video.md new file mode 100644 index 000000000..c38e1d55e --- /dev/null +++ b/content/zh/learn/level_6/lesson_5/video.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-DevOps 流水线准备 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/12%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-DevOps%20%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%87%86%E5%A4%87.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_6/_index.md b/content/zh/learn/level_6/lesson_6/_index.md new file mode 100644 index 000000000..3406e1b1d --- /dev/null +++ b/content/zh/learn/level_6/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-质量扫描-单元测试-制品管理 +weight: 6 + +_build: + render: false + +profit: 质量扫描、单元测试、制品管理操作演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_6/content.md b/content/zh/learn/level_6/lesson_6/content.md new file mode 100644 index 000000000..ccd7d13b9 --- /dev/null +++ b/content/zh/learn/level_6/lesson_6/content.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-质量扫描-单元测试-制品管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf +--- + diff --git a/content/zh/learn/level_6/lesson_6/video.md b/content/zh/learn/level_6/lesson_6/video.md new file mode 100644 index 000000000..44c7512ea --- /dev/null +++ b/content/zh/learn/level_6/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-质量扫描-单元测试-制品管理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/13%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-%E8%B4%A8%E9%87%8F%E6%89%AB%E6%8F%8F-%E5%8D%95%E5%85%83%E6%B5%8B%E8%AF%95-%E5%88%B6%E5%93%81%E7%AE%A1%E7%90%86.mp4 +--- diff --git a/content/zh/learn/level_6/lesson_7/_index.md b/content/zh/learn/level_6/lesson_7/_index.md new file mode 100644 index 000000000..938a2f45d --- /dev/null +++ b/content/zh/learn/level_6/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-DevOps 自动测试构建 +weight: 7 + +_build: + render: false + +profit: DevOps 自动测试构建实操演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_7/content.md b/content/zh/learn/level_6/lesson_7/content.md new file mode 100644 index 000000000..8ec27f8f6 --- /dev/null +++ b/content/zh/learn/level_6/lesson_7/content.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-DevOps 自动测试构建 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf + +--- + diff --git a/content/zh/learn/level_6/lesson_7/video.md b/content/zh/learn/level_6/lesson_7/video.md new file mode 100644 index 000000000..edc0c02fa --- /dev/null +++ b/content/zh/learn/level_6/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-DevOps 自动测试构建 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/14%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-DevOps%20%E8%87%AA%E5%8A%A8%E6%B5%8B%E8%AF%95%E6%9E%84%E5%BB%BA.mp4 +--- diff --git a/content/zh/learn/level_6/lesson_8/_index.md b/content/zh/learn/level_6/lesson_8/_index.md new file mode 100644 index 000000000..bdf3ece76 --- /dev/null +++ b/content/zh/learn/level_6/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-DevOps 自动部署 +weight: 8 + +_build: + render: false + +profit: DevOps 自动部署实操演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_8/content.md b/content/zh/learn/level_6/lesson_8/content.md new file mode 100644 index 000000000..f46078d2e --- /dev/null +++ b/content/zh/learn/level_6/lesson_8/content.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-DevOps 自动部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/DevOps%20%E5%AE%9E%E6%88%98-%E6%8C%81%E7%BB%AD%E9%9B%86%E6%88%90%E6%8C%81%E7%BB%AD%E4%BA%A4%E4%BB%98%E5%85%A8%E6%B5%81%E7%A8%8B%E6%B5%81%E6%B0%B4%E7%BA%BF%E5%B7%A5%E5%85%B7%E7%9A%84%E8%90%BD%E5%9C%B0.pdf + +--- + diff --git a/content/zh/learn/level_6/lesson_8/video.md b/content/zh/learn/level_6/lesson_8/video.md new file mode 100644 index 000000000..8e4248ca6 --- /dev/null +++ b/content/zh/learn/level_6/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-DevOps 自动部署 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/15%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-DevOps%20%E8%87%AA%E5%8A%A8%E9%83%A8%E7%BD%B2.mp4 +--- diff --git a/content/zh/learn/level_6/lesson_9/_index.md b/content/zh/learn/level_6/lesson_9/_index.md new file mode 100644 index 000000000..50ef81fa7 --- /dev/null +++ b/content/zh/learn/level_6/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: DevOps 实践-使用 Source-to-Image 发布应用 +weight: 9 + +_build: + render: false + +profit: S2I 实操演示 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_6/lesson_9/content.md b/content/zh/learn/level_6/lesson_9/content.md new file mode 100644 index 000000000..8058457c8 --- /dev/null +++ b/content/zh/learn/level_6/lesson_9/content.md @@ -0,0 +1,9 @@ +--- +title: DevOps 实践-使用 Source-to-Image 发布应用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: + +--- + diff --git a/content/zh/learn/level_6/lesson_9/video.md b/content/zh/learn/level_6/lesson_9/video.md new file mode 100644 index 000000000..1d85a5f9f --- /dev/null +++ b/content/zh/learn/level_6/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: DevOps 实践-使用 Source-to-Image 发布应用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/16%E3%80%81DevOps%20%E5%AE%9E%E8%B7%B5-%E4%BD%BF%E7%94%A8%20Source-to-Image%20%E5%8F%91%E5%B8%83%E5%BA%94%E7%94%A8.mp4 +--- diff --git a/content/zh/learn/level_7/_index.md b/content/zh/learn/level_7/_index.md new file mode 100644 index 000000000..14ccc7bbd --- /dev/null +++ b/content/zh/learn/level_7/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第七章: Kubernetes 集群与云原生应用管理 +weight: 7 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_1/_index.md b/content/zh/learn/level_7/lesson_1/_index.md new file mode 100644 index 000000000..957336217 --- /dev/null +++ b/content/zh/learn/level_7/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 多节点备份与恢复 +weight: 1 + +_build: + render: false + +profit: 使用 Velero 备份与恢复 KubeSphere 集群 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_1/content.md b/content/zh/learn/level_7/lesson_1/content.md new file mode 100644 index 000000000..5a0f618e8 --- /dev/null +++ b/content/zh/learn/level_7/lesson_1/content.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 多节点备份与恢复 +keywords: Kubesphere, Kubesphere learn +description: 使用 Velero 备份与恢复 KubeSphere 集群 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/KubeSphere%20%E5%A4%87%E4%BB%BD%E4%B8%8E%E6%81%A2%E5%A4%8D-%E5%A4%9A%E8%8A%82%E7%82%B9%E6%A8%A1%E5%BC%8F.pdf + +--- + diff --git a/content/zh/learn/level_7/lesson_1/video.md b/content/zh/learn/level_7/lesson_1/video.md new file mode 100644 index 000000000..7f6d2bc10 --- /dev/null +++ b/content/zh/learn/level_7/lesson_1/video.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 多节点备份与恢复 +keywords: Kubesphere, Kubesphere learn +description: 使用 Velero 备份与恢复 KubeSphere 集群 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/19%E3%80%81KubeSphere%20%E5%A4%87%E4%BB%BD%E4%B8%8E%E6%81%A2%E5%A4%8D-%E5%A4%9A%E8%8A%82%E7%82%B9%E6%A8%A1%E5%BC%8F.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_10/_index.md b/content/zh/learn/level_7/lesson_10/_index.md new file mode 100644 index 000000000..29f28b511 --- /dev/null +++ b/content/zh/learn/level_7/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 应用全生命周期实践 +weight: 10 + +_build: + render: false + +profit: 了解 KubeSphere 应用的完整生命周期 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_10/content.md b/content/zh/learn/level_7/lesson_10/content.md new file mode 100644 index 000000000..d21083fac --- /dev/null +++ b/content/zh/learn/level_7/lesson_10/content.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 应用全生命周期实践 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeSphere 应用的完整生命周期 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Helm-Helm%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.pdf + +--- + diff --git a/content/zh/learn/level_7/lesson_10/video.md b/content/zh/learn/level_7/lesson_10/video.md new file mode 100644 index 000000000..71f046793 --- /dev/null +++ b/content/zh/learn/level_7/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用全生命周期实践 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeSphere 应用的完整生命周期 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/28%E3%80%81Helm-KubeSphere%20%E5%BA%94%E7%94%A8%E5%85%A8%E7%94%9F%E5%91%BD%E5%91%A8%E6%9C%9F%E5%AE%9E%E8%B7%B5.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_11/_index.md b/content/zh/learn/level_7/lesson_11/_index.md new file mode 100644 index 000000000..06ea0af23 --- /dev/null +++ b/content/zh/learn/level_7/lesson_11/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 应用仓库管理 +weight: 11 + +_build: + render: false + +profit: 管理 KubeSphere 的应用仓库 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_11/content.md b/content/zh/learn/level_7/lesson_11/content.md new file mode 100644 index 000000000..38c10c315 --- /dev/null +++ b/content/zh/learn/level_7/lesson_11/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用仓库管理 +keywords: Kubesphere, Kubesphere learn +description: 管理 KubeSphere 的应用仓库 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Helm-Helm%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.pdf +--- + diff --git a/content/zh/learn/level_7/lesson_11/video.md b/content/zh/learn/level_7/lesson_11/video.md new file mode 100644 index 000000000..83a938ca8 --- /dev/null +++ b/content/zh/learn/level_7/lesson_11/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用仓库管理 +keywords: Kubesphere, Kubesphere learn +description: 管理 KubeSphere 的应用仓库 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/29%E3%80%81Helm-KubeSphere%20%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%A1%E7%90%86.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_12/_index.md b/content/zh/learn/level_7/lesson_12/_index.md new file mode 100644 index 000000000..d3ed824ef --- /dev/null +++ b/content/zh/learn/level_7/lesson_12/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 应用管理场景 +weight: 12 + +_build: + render: false + +profit: 部署、升级与删除 KubeSphere 示例应用 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_12/content.md b/content/zh/learn/level_7/lesson_12/content.md new file mode 100644 index 000000000..20f7dcceb --- /dev/null +++ b/content/zh/learn/level_7/lesson_12/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用管理场景 +keywords: Kubesphere, Kubesphere learn +description: 部署、升级与删除 KubeSphere 示例应用 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Helm-Helm%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.pdf +--- + diff --git a/content/zh/learn/level_7/lesson_12/video.md b/content/zh/learn/level_7/lesson_12/video.md new file mode 100644 index 000000000..8c7f1292e --- /dev/null +++ b/content/zh/learn/level_7/lesson_12/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用管理场景 +keywords: Kubesphere, Kubesphere learn +description: 部署、升级与删除 KubeSphere 示例应用 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/30%E3%80%81Helm-KubeSphere%20%E5%BA%94%E7%94%A8%E7%AE%A1%E7%90%86%E5%9C%BA%E6%99%AF.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_13/_index.md b/content/zh/learn/level_7/lesson_13/_index.md new file mode 100644 index 000000000..0dd81c19c --- /dev/null +++ b/content/zh/learn/level_7/lesson_13/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes Federation 介绍 +weight: 13 + +_build: + render: false + +profit: 了解 Kubernetes Federation 的架构和原理 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_13/content.md b/content/zh/learn/level_7/lesson_13/content.md new file mode 100644 index 000000000..599a224fc --- /dev/null +++ b/content/zh/learn/level_7/lesson_13/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes Federation 介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 Kubernetes Federation 的架构和原理 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf +--- + diff --git a/content/zh/learn/level_7/lesson_13/video.md b/content/zh/learn/level_7/lesson_13/video.md new file mode 100644 index 000000000..39544f75b --- /dev/null +++ b/content/zh/learn/level_7/lesson_13/video.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes Federation 介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 Kubernetes Federation 的架构和原理 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/31%E3%80%81Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8-Kubernetes%20Federation%20%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_14/_index.md b/content/zh/learn/level_7/lesson_14/_index.md new file mode 100644 index 000000000..a6bdab4d9 --- /dev/null +++ b/content/zh/learn/level_7/lesson_14/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 多集群介绍 +weight: 14 + +_build: + render: false + +profit: 了解 KubeSphere 多集群原理 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_14/content.md b/content/zh/learn/level_7/lesson_14/content.md new file mode 100644 index 000000000..dd4f83cd2 --- /dev/null +++ b/content/zh/learn/level_7/lesson_14/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 多集群介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeSphere 多集群原理 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf +--- + diff --git a/content/zh/learn/level_7/lesson_14/video.md b/content/zh/learn/level_7/lesson_14/video.md new file mode 100644 index 000000000..1addc34e7 --- /dev/null +++ b/content/zh/learn/level_7/lesson_14/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 多集群介绍 +keywords: Kubesphere, Kubesphere learn +description: 了解 KubeSphere 多集群原理 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/32%E3%80%81Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8-KubeSphere%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E4%BB%8B%E7%BB%8D.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_15/_index.md b/content/zh/learn/level_7/lesson_15/_index.md new file mode 100644 index 000000000..334dc08c3 --- /dev/null +++ b/content/zh/learn/level_7/lesson_15/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 跨多集群的应用发布 +weight: 15 + +_build: + render: false + +profit: 使用 KubeSphere 跨多集群发布应用 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_15/content.md b/content/zh/learn/level_7/lesson_15/content.md new file mode 100644 index 000000000..e3cfd597a --- /dev/null +++ b/content/zh/learn/level_7/lesson_15/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 跨多集群的应用发布 +keywords: Kubesphere, Kubesphere learn +description: 使用 KubeSphere 跨多集群发布应用 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf +--- + diff --git a/content/zh/learn/level_7/lesson_15/video.md b/content/zh/learn/level_7/lesson_15/video.md new file mode 100644 index 000000000..ba3421128 --- /dev/null +++ b/content/zh/learn/level_7/lesson_15/video.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 跨多集群的应用发布 +keywords: Kubesphere, Kubesphere learn +description: 使用 KubeSphere 跨多集群发布应用 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/33%E3%80%81Kubernetes%20%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%AE%A1%E7%90%86%E4%B8%8E%E4%BD%BF%E7%94%A8-Kubernetes%20%E8%B7%A8%E5%A4%9A%E9%9B%86%E7%BE%A4%E7%9A%84%E5%BA%94%E7%94%A8%E5%8F%91%E5%B8%83.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_2/_index.md b/content/zh/learn/level_7/lesson_2/_index.md new file mode 100644 index 000000000..c26490613 --- /dev/null +++ b/content/zh/learn/level_7/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群 Master 节点启停 +weight: 2 + +_build: + render: false + +profit: 演示停止、启动 Master 节点的过程 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_2/content.md b/content/zh/learn/level_7/lesson_2/content.md new file mode 100644 index 000000000..aa7ca356b --- /dev/null +++ b/content/zh/learn/level_7/lesson_2/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群 Master 节点启停 +keywords: Kubesphere, Kubesphere learn +description: 演示停止、启动 Master 节点的过程 + +pdfUrl: +--- + diff --git a/content/zh/learn/level_7/lesson_2/video.md b/content/zh/learn/level_7/lesson_2/video.md new file mode 100644 index 000000000..c405764e7 --- /dev/null +++ b/content/zh/learn/level_7/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群 Master 节点启停 +keywords: Kubesphere, Kubesphere learn +description: 演示停止、启动 Master 节点的过程 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/20%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E9%87%8D%E5%90%AF%E4%B8%8E%E6%81%A2%E5%A4%8D-Master%20%E8%8A%82%E7%82%B9%E5%90%AF%E5%81%9C.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_3/_index.md b/content/zh/learn/level_7/lesson_3/_index.md new file mode 100644 index 000000000..7960a4fb8 --- /dev/null +++ b/content/zh/learn/level_7/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群 Worker 节点启停 +weight: 3 + +_build: + render: false + +profit: 演示停止、启动 Worker 节点的过程 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_3/content.md b/content/zh/learn/level_7/lesson_3/content.md new file mode 100644 index 000000000..873f6d5c4 --- /dev/null +++ b/content/zh/learn/level_7/lesson_3/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群 Worker 节点启停 +keywords: Kubesphere, Kubesphere learn +description: 演示停止、启动 Worker 节点的过程 + +pdfUrl: + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_3/video.md b/content/zh/learn/level_7/lesson_3/video.md new file mode 100644 index 000000000..632680634 --- /dev/null +++ b/content/zh/learn/level_7/lesson_3/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 集群 Worker 节点启停 +keywords: Kubesphere, Kubesphere learn +description: 演示停止、启动 Worker 节点的过程 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/21%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E9%87%8D%E5%90%AF%E4%B8%8E%E6%81%A2%E5%A4%8D-Worker%20%E8%8A%82%E7%82%B9%E5%90%AF%E5%81%9C.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_4/_index.md b/content/zh/learn/level_7/lesson_4/_index.md new file mode 100644 index 000000000..ad7c323a0 --- /dev/null +++ b/content/zh/learn/level_7/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群重启与恢复 +weight: 4 + +_build: + render: false + +profit: 演示重启集群的功能 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_4/content.md b/content/zh/learn/level_7/lesson_4/content.md new file mode 100644 index 000000000..df58a30ae --- /dev/null +++ b/content/zh/learn/level_7/lesson_4/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群重启与恢复 +keywords: Kubesphere, Kubesphere learn +description: 演示重启集群的功能 + +pdfUrl: + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_4/video.md b/content/zh/learn/level_7/lesson_4/video.md new file mode 100644 index 000000000..89ac6b13c --- /dev/null +++ b/content/zh/learn/level_7/lesson_4/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 集群重启与恢复 +keywords: Kubesphere, Kubesphere learn +description: 演示重启集群的功能 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/22%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E9%87%8D%E5%90%AF%E4%B8%8E%E6%81%A2%E5%A4%8D-%E9%9B%86%E7%BE%A4%E5%90%AF%E5%81%9C.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_5/_index.md b/content/zh/learn/level_7/lesson_5/_index.md new file mode 100644 index 000000000..6f253f481 --- /dev/null +++ b/content/zh/learn/level_7/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 在 KubeSphere 上调试应用 +weight: 5 + +_build: + render: false + +profit: 了解 Kubesphere 的排错方法与工具 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_5/content.md b/content/zh/learn/level_7/lesson_5/content.md new file mode 100644 index 000000000..b39854e73 --- /dev/null +++ b/content/zh/learn/level_7/lesson_5/content.md @@ -0,0 +1,8 @@ +--- +title: 在 KubeSphere 上调试应用 +keywords: Kubesphere, Kubesphere learn +description: 了解 Kubesphere 的排错方法与工具 + +pdfUrl: + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_5/video.md b/content/zh/learn/level_7/lesson_5/video.md new file mode 100644 index 000000000..73c391ca3 --- /dev/null +++ b/content/zh/learn/level_7/lesson_5/video.md @@ -0,0 +1,9 @@ +--- +title: 在 KubeSphere 上调试应用 +keywords: Kubesphere, Kubesphere learn +description: 了解 Kubesphere 的排错方法与工具 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/23%E3%80%81%E5%9C%A8%20KubeSphere%20%E4%B8%8A%E8%B0%83%E8%AF%95%E5%BA%94%E7%94%A8.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_6/_index.md b/content/zh/learn/level_7/lesson_6/_index.md new file mode 100644 index 000000000..eed476c68 --- /dev/null +++ b/content/zh/learn/level_7/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 应用调度 +weight: 6 + +_build: + render: false + +profit: 将应用调度到指定主机 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_6/content.md b/content/zh/learn/level_7/lesson_6/content.md new file mode 100644 index 000000000..8862471b9 --- /dev/null +++ b/content/zh/learn/level_7/lesson_6/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用调度 +keywords: Kubesphere, Kubesphere learn +description: 将应用调度到指定主机 + +pdfUrl: +--- + diff --git a/content/zh/learn/level_7/lesson_6/video.md b/content/zh/learn/level_7/lesson_6/video.md new file mode 100644 index 000000000..dff0e9b95 --- /dev/null +++ b/content/zh/learn/level_7/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用调度 +keywords: Kubesphere, Kubesphere learn +description: 将应用调度到指定主机 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/24%E3%80%81KubeSphere%20%E5%BA%94%E7%94%A8%E8%B0%83%E5%BA%A6.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_7/_index.md b/content/zh/learn/level_7/lesson_7/_index.md new file mode 100644 index 000000000..81df8566f --- /dev/null +++ b/content/zh/learn/level_7/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 创建并部署 WordPress +weight: 7 + +_build: + render: false + +profit: 使用 KubeSphere 部署 Wordpress +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_7/content.md b/content/zh/learn/level_7/lesson_7/content.md new file mode 100644 index 000000000..63c73bd4b --- /dev/null +++ b/content/zh/learn/level_7/lesson_7/content.md @@ -0,0 +1,9 @@ +--- +title: 创建并部署 WordPress +keywords: Kubesphere, Kubesphere learn +description: 使用 KubeSphere 部署 Wordpress + +pdfUrl: + +--- + diff --git a/content/zh/learn/level_7/lesson_7/video.md b/content/zh/learn/level_7/lesson_7/video.md new file mode 100644 index 000000000..762d076fe --- /dev/null +++ b/content/zh/learn/level_7/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: 创建并部署 WordPress +keywords: Kubesphere, Kubesphere learn +description: 使用 KubeSphere 部署 Wordpress + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/25%E3%80%81%E5%88%9B%E5%BB%BA%E5%B9%B6%E9%83%A8%E7%BD%B2%20WordPress.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_8/_index.md b/content/zh/learn/level_7/lesson_8/_index.md new file mode 100644 index 000000000..f4f828902 --- /dev/null +++ b/content/zh/learn/level_7/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Helm 及应用仓库简介 +weight: 8 + +_build: + render: false + +profit: 了解 Helm 及其应用仓库的概念 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_8/content.md b/content/zh/learn/level_7/lesson_8/content.md new file mode 100644 index 000000000..63254ee54 --- /dev/null +++ b/content/zh/learn/level_7/lesson_8/content.md @@ -0,0 +1,9 @@ +--- +title: Helm 及应用仓库简介 +keywords: Kubesphere, Kubesphere learn +description: 了解 Helm 及其应用仓库的概念 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Helm-Helm%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.pdf + +--- + diff --git a/content/zh/learn/level_7/lesson_8/video.md b/content/zh/learn/level_7/lesson_8/video.md new file mode 100644 index 000000000..87f89554a --- /dev/null +++ b/content/zh/learn/level_7/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: Helm 及应用仓库简介 +keywords: Kubesphere, Kubesphere learn +description: 了解 Helm 及其应用仓库的概念 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/26%E3%80%81Helm-Helm%20%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.mp4 +--- diff --git a/content/zh/learn/level_7/lesson_9/_index.md b/content/zh/learn/level_7/lesson_9/_index.md new file mode 100644 index 000000000..6e4437fc5 --- /dev/null +++ b/content/zh/learn/level_7/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 如何开发一个 Helm 应用 +weight: 9 + +_build: + render: false + +profit: 了解 Helm 应用开发规范 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_7/lesson_9/content.md b/content/zh/learn/level_7/lesson_9/content.md new file mode 100644 index 000000000..a8ba0bb4e --- /dev/null +++ b/content/zh/learn/level_7/lesson_9/content.md @@ -0,0 +1,9 @@ +--- +title: 如何开发一个 Helm 应用 +keywords: Kubesphere, Kubesphere learn +description: 了解 Helm 应用开发规范 + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Helm-Helm%E5%8F%8A%E5%BA%94%E7%94%A8%E4%BB%93%E5%BA%93%E7%AE%80%E4%BB%8B.pdf + +--- + diff --git a/content/zh/learn/level_7/lesson_9/video.md b/content/zh/learn/level_7/lesson_9/video.md new file mode 100644 index 000000000..833564b0b --- /dev/null +++ b/content/zh/learn/level_7/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: 如何开发一个 Helm 应用 +keywords: Kubesphere, Kubesphere learn +description: 了解 Helm 应用开发规范 + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/27%E3%80%81Helm-%E5%A6%82%E4%BD%95%E5%BC%80%E5%8F%91%E4%B8%80%E4%B8%AA%20Helm%20%E5%BA%94%E7%94%A8.mp4 +--- diff --git a/content/zh/learn/level_8/_index.md b/content/zh/learn/level_8/_index.md new file mode 100644 index 000000000..71eab1220 --- /dev/null +++ b/content/zh/learn/level_8/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第八章:微服务部署与流量治理 +weight: 8 + +_build: + render: false +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_1/_index.md b/content/zh/learn/level_8/lesson_1/_index.md new file mode 100644 index 000000000..27421e620 --- /dev/null +++ b/content/zh/learn/level_8/lesson_1/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: SpringCloud 与 kubernetes 基础概念 +weight: 1 + +_build: + render: false + +profit: 了解 kubernetes 下微服务的基础概念 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_1/content.md b/content/zh/learn/level_8/lesson_1/content.md new file mode 100644 index 000000000..e523a4b02 --- /dev/null +++ b/content/zh/learn/level_8/lesson_1/content.md @@ -0,0 +1,9 @@ +--- +title: SpringCloud 与 kubernetes 基础概念 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://note.wansir.com/bitF5hQCSheJjMOHSjSXsQ?view#Spring-Cloud--K8s-最佳实践 + +--- + diff --git a/content/zh/learn/level_8/lesson_1/video.md b/content/zh/learn/level_8/lesson_1/video.md new file mode 100644 index 000000000..d99ae0df5 --- /dev/null +++ b/content/zh/learn/level_8/lesson_1/video.md @@ -0,0 +1,9 @@ +--- +title: SpringCloud 与 kubernetes 基础概念 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/34%E3%80%81%E5%BE%AE%E6%9C%8D%E5%8A%A1-spring-cloud-kubernetes%20%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_10/_index.md b/content/zh/learn/level_8/lesson_10/_index.md new file mode 100644 index 000000000..477586b80 --- /dev/null +++ b/content/zh/learn/level_8/lesson_10/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 虚拟服务和目标规则使用 +weight: 10 + +_build: + render: false + +profit: 通过实践来理解 Istio 虚拟服务和目标规则使用 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_10/content.md b/content/zh/learn/level_8/lesson_10/content.md new file mode 100644 index 000000000..d4635ad9e --- /dev/null +++ b/content/zh/learn/level_8/lesson_10/content.md @@ -0,0 +1,9 @@ +--- +title: 虚拟服务和目标规则使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_10/video.md b/content/zh/learn/level_8/lesson_10/video.md new file mode 100644 index 000000000..a755e3f80 --- /dev/null +++ b/content/zh/learn/level_8/lesson_10/video.md @@ -0,0 +1,8 @@ +--- +title: 虚拟服务和目标规则使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/43%E3%80%81Service%20Mesh-%E8%99%9A%E6%8B%9F%E6%9C%8D%E5%8A%A1%E5%92%8C%E7%9B%AE%E6%A0%87%E8%A7%84%E5%88%99%E4%BD%BF%E7%94%A8.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_11/_index.md b/content/zh/learn/level_8/lesson_11/_index.md new file mode 100644 index 000000000..2e87ea3c8 --- /dev/null +++ b/content/zh/learn/level_8/lesson_11/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 启用服务治理 +weight: 11 + +_build: + render: false + +profit: 在 KubeSphere 上使用基于 Istio 的服务治理 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_11/content.md b/content/zh/learn/level_8/lesson_11/content.md new file mode 100644 index 000000000..9f79d6f93 --- /dev/null +++ b/content/zh/learn/level_8/lesson_11/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 启用服务治理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf +--- + diff --git a/content/zh/learn/level_8/lesson_11/video.md b/content/zh/learn/level_8/lesson_11/video.md new file mode 100644 index 000000000..a1ca366a9 --- /dev/null +++ b/content/zh/learn/level_8/lesson_11/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 启用服务治理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/44%E3%80%81Service%20Mesh-KubeSphere%20%E5%90%AF%E7%94%A8%E6%9C%8D%E5%8A%A1%E6%B2%BB%E7%90%86.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_12/_index.md b/content/zh/learn/level_8/lesson_12/_index.md new file mode 100644 index 000000000..d9899e5ab --- /dev/null +++ b/content/zh/learn/level_8/lesson_12/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 灰度发布 +weight: 12 + +_build: + render: false + +profit: 在 KubeSphere 上实践灰度发布 Bookinfo 项目 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_12/content.md b/content/zh/learn/level_8/lesson_12/content.md new file mode 100644 index 000000000..d882fa6b0 --- /dev/null +++ b/content/zh/learn/level_8/lesson_12/content.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 灰度发布 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_12/video.md b/content/zh/learn/level_8/lesson_12/video.md new file mode 100644 index 000000000..e34877501 --- /dev/null +++ b/content/zh/learn/level_8/lesson_12/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 灰度发布 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/45%E3%80%81Service%20Mesh-KubeSphere%20%E7%81%B0%E5%BA%A6%E5%8F%91%E5%B8%83.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_2/_index.md b/content/zh/learn/level_8/lesson_2/_index.md new file mode 100644 index 000000000..d3a75dc8a --- /dev/null +++ b/content/zh/learn/level_8/lesson_2/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 部署 SpringCloud 全家桶 +weight: 2 + +_build: + render: false + +profit: 在 KubeSphere 上部署微服务 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_2/content.md b/content/zh/learn/level_8/lesson_2/content.md new file mode 100644 index 000000000..005e9aafe --- /dev/null +++ b/content/zh/learn/level_8/lesson_2/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 部署 SpringCloud 全家桶 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://note.wansir.com/bitF5hQCSheJjMOHSjSXsQ?view#Spring-Cloud--K8s-%E6%9C%80%E4%BD%B3%E5%AE%9E%E8%B7%B5 +--- + diff --git a/content/zh/learn/level_8/lesson_2/video.md b/content/zh/learn/level_8/lesson_2/video.md new file mode 100644 index 000000000..fa3beea11 --- /dev/null +++ b/content/zh/learn/level_8/lesson_2/video.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 部署 SpringCloud 全家桶 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/35%E3%80%81%E5%BE%AE%E6%9C%8D%E5%8A%A1-%E5%9C%A8%20KubeSphere%20%E4%B8%8A%E9%83%A8%E7%BD%B2%20Spring%20Cloud%20%E5%85%A8%E5%AE%B6%E6%A1%B6.mp4 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_3/_index.md b/content/zh/learn/level_8/lesson_3/_index.md new file mode 100644 index 000000000..78984400f --- /dev/null +++ b/content/zh/learn/level_8/lesson_3/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 微服务概念回顾 +weight: 3 + +_build: + render: false + +profit: 了解微服务概念 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_3/content.md b/content/zh/learn/level_8/lesson_3/content.md new file mode 100644 index 000000000..593788689 --- /dev/null +++ b/content/zh/learn/level_8/lesson_3/content.md @@ -0,0 +1,8 @@ +--- +title: 微服务概念回顾 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_3/video.md b/content/zh/learn/level_8/lesson_3/video.md new file mode 100644 index 000000000..571468199 --- /dev/null +++ b/content/zh/learn/level_8/lesson_3/video.md @@ -0,0 +1,9 @@ +--- +title: 微服务概念回顾 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/36%E3%80%81Service%20Mesh-%E5%BE%AE%E6%9C%8D%E5%8A%A1%E6%A6%82%E5%BF%B5%E5%9B%9E%E9%A1%BE.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_4/_index.md b/content/zh/learn/level_8/lesson_4/_index.md new file mode 100644 index 000000000..59a12a821 --- /dev/null +++ b/content/zh/learn/level_8/lesson_4/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: 传统微服务架构的挑战 +weight: 4 + +_build: + render: false + +profit: 了解传统微服务架构存在的挑战 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_4/content.md b/content/zh/learn/level_8/lesson_4/content.md new file mode 100644 index 000000000..6e288515c --- /dev/null +++ b/content/zh/learn/level_8/lesson_4/content.md @@ -0,0 +1,8 @@ +--- +title: 传统微服务架构的挑战 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_4/video.md b/content/zh/learn/level_8/lesson_4/video.md new file mode 100644 index 000000000..65a4dce35 --- /dev/null +++ b/content/zh/learn/level_8/lesson_4/video.md @@ -0,0 +1,9 @@ +--- +title: 传统微服务架构的挑战 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/37%E3%80%81Service%20Mesh-%E4%BC%A0%E7%BB%9F%E5%BE%AE%E6%9C%8D%E5%8A%A1%E6%9E%B6%E6%9E%84%E7%9A%84%E6%8C%91%E6%88%98.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_5/_index.md b/content/zh/learn/level_8/lesson_5/_index.md new file mode 100644 index 000000000..20000058b --- /dev/null +++ b/content/zh/learn/level_8/lesson_5/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: ServiceMesh 概念与架构 +weight: 5 + +_build: + render: false + +profit: 通过服务架构的演进来理解服务网格的概念 +time: 2021-12-17 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_5/content.md b/content/zh/learn/level_8/lesson_5/content.md new file mode 100644 index 000000000..f89bea381 --- /dev/null +++ b/content/zh/learn/level_8/lesson_5/content.md @@ -0,0 +1,8 @@ +--- +title: ServiceMesh 概念与架构 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_5/video.md b/content/zh/learn/level_8/lesson_5/video.md new file mode 100644 index 000000000..c42a374a5 --- /dev/null +++ b/content/zh/learn/level_8/lesson_5/video.md @@ -0,0 +1,9 @@ +--- +title: ServiceMesh 概念与架构 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/38%E3%80%81Service%20Mesh-Service%20Mesh%20%E6%A6%82%E5%BF%B5%E4%B8%8E%E6%9E%B6%E6%9E%84.mp4 + +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_6/_index.md b/content/zh/learn/level_8/lesson_6/_index.md new file mode 100644 index 000000000..638a7219c --- /dev/null +++ b/content/zh/learn/level_8/lesson_6/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Isito 简介 +weight: 6 + +_build: + render: false + +profit: 了解 Isito 服务网格框架 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_6/content.md b/content/zh/learn/level_8/lesson_6/content.md new file mode 100644 index 000000000..fc898380b --- /dev/null +++ b/content/zh/learn/level_8/lesson_6/content.md @@ -0,0 +1,9 @@ +--- +title: Isito 简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_6/video.md b/content/zh/learn/level_8/lesson_6/video.md new file mode 100644 index 000000000..c2deacec1 --- /dev/null +++ b/content/zh/learn/level_8/lesson_6/video.md @@ -0,0 +1,8 @@ +--- +title: Isito 简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/39%E3%80%81Service%20Mesh-Isito%20%E7%AE%80%E4%BB%8B.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_7/_index.md b/content/zh/learn/level_8/lesson_7/_index.md new file mode 100644 index 000000000..68373c66e --- /dev/null +++ b/content/zh/learn/level_8/lesson_7/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Istio 安装 +weight: 7 + +_build: + render: false + +profit: 通过实践演示来理解 Istio +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_7/content.md b/content/zh/learn/level_8/lesson_7/content.md new file mode 100644 index 000000000..2809df809 --- /dev/null +++ b/content/zh/learn/level_8/lesson_7/content.md @@ -0,0 +1,9 @@ +--- +title: Istio 安装 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_7/video.md b/content/zh/learn/level_8/lesson_7/video.md new file mode 100644 index 000000000..5555c411b --- /dev/null +++ b/content/zh/learn/level_8/lesson_7/video.md @@ -0,0 +1,8 @@ +--- +title: Istio 安装 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/40%E3%80%81Service%20Mesh-Istio%20%E5%AE%89%E8%A3%85.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_8/_index.md b/content/zh/learn/level_8/lesson_8/_index.md new file mode 100644 index 000000000..2d25d02c7 --- /dev/null +++ b/content/zh/learn/level_8/lesson_8/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Bookinfo 演示 +weight: 8 + +_build: + render: false + +profit: 通过实践 Bookinfo 项目来演示 Istio 的各种特性 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_8/content.md b/content/zh/learn/level_8/lesson_8/content.md new file mode 100644 index 000000000..d70eacc81 --- /dev/null +++ b/content/zh/learn/level_8/lesson_8/content.md @@ -0,0 +1,9 @@ +--- +title: Bookinfo 演示 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_8/video.md b/content/zh/learn/level_8/lesson_8/video.md new file mode 100644 index 000000000..4cbac0516 --- /dev/null +++ b/content/zh/learn/level_8/lesson_8/video.md @@ -0,0 +1,8 @@ +--- +title: Bookinfo 演示 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/41%E3%80%81Service%20Mesh-Bookinfo%20%E6%BC%94%E7%A4%BA.mp4 +--- diff --git a/content/zh/learn/level_8/lesson_9/_index.md b/content/zh/learn/level_8/lesson_9/_index.md new file mode 100644 index 000000000..af235cb73 --- /dev/null +++ b/content/zh/learn/level_8/lesson_9/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Istio 核心概念解读 +weight: 9 + +_build: + render: false + +profit: 了解 Istio 核心概念 +time: 2021-12-18 20:00-20:40 +--- \ No newline at end of file diff --git a/content/zh/learn/level_8/lesson_9/content.md b/content/zh/learn/level_8/lesson_9/content.md new file mode 100644 index 000000000..5cf91532c --- /dev/null +++ b/content/zh/learn/level_8/lesson_9/content.md @@ -0,0 +1,9 @@ +--- +title: Istio 核心概念解读 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Service%20Mesh%20%E5%85%A5%E9%97%A8%E4%B8%8E%E5%AE%9E%E6%88%98.pdf + +--- + diff --git a/content/zh/learn/level_8/lesson_9/video.md b/content/zh/learn/level_8/lesson_9/video.md new file mode 100644 index 000000000..f091033c7 --- /dev/null +++ b/content/zh/learn/level_8/lesson_9/video.md @@ -0,0 +1,8 @@ +--- +title: Istio 核心概念解读 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/42%E3%80%81Service%20Mesh-Istio%20%E6%A0%B8%E5%BF%83%E6%A6%82%E5%BF%B5%E8%A7%A3%E8%AF%BB.mp4 +--- diff --git a/content/zh/learn/level_9/_index.md b/content/zh/learn/level_9/_index.md new file mode 100644 index 000000000..4b66f6298 --- /dev/null +++ b/content/zh/learn/level_9/_index.md @@ -0,0 +1,7 @@ +--- +linkTitle: 第九章:Kubernetes 云原生可观测性 +weight: 9 + +_build: + render: false +--- diff --git a/content/zh/learn/level_9/lesson_46/_index.md b/content/zh/learn/level_9/lesson_46/_index.md new file mode 100644 index 000000000..874b57cd8 --- /dev/null +++ b/content/zh/learn/level_9/lesson_46/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群日志生产部署最佳实践 +weight: 46 + +_build: + render: false + +profit: 启用日志组件 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_46/content.md b/content/zh/learn/level_9/lesson_46/content.md new file mode 100644 index 000000000..4db7cea60 --- /dev/null +++ b/content/zh/learn/level_9/lesson_46/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群日志生产部署最佳实践 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_46/video.md b/content/zh/learn/level_9/lesson_46/video.md new file mode 100644 index 000000000..b02d977a9 --- /dev/null +++ b/content/zh/learn/level_9/lesson_46/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 集群日志生产部署最佳实践 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/46%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97-%E7%94%9F%E4%BA%A7%E9%83%A8%E7%BD%B2%E6%9C%80%E4%BD%B3%E5%AE%9E%E8%B7%B5.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_47/_index.md b/content/zh/learn/level_9/lesson_47/_index.md new file mode 100644 index 000000000..9770f4c41 --- /dev/null +++ b/content/zh/learn/level_9/lesson_47/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群日志检索与落盘日志收集 +weight: 47 + +_build: + render: false + +profit: 容器日志检索与落盘日志收集 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_47/content.md b/content/zh/learn/level_9/lesson_47/content.md new file mode 100644 index 000000000..8037a00cd --- /dev/null +++ b/content/zh/learn/level_9/lesson_47/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群日志检索与落盘日志收集 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_47/video.md b/content/zh/learn/level_9/lesson_47/video.md new file mode 100644 index 000000000..ec052337c --- /dev/null +++ b/content/zh/learn/level_9/lesson_47/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 集群日志检索与落盘日志收集 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/47%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97-%E6%97%A5%E5%BF%97%E6%A3%80%E7%B4%A2%E4%B8%8E%E8%90%BD%E7%9B%98%E6%97%A5%E5%BF%97%E6%94%B6%E9%9B%86.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_48/_index.md b/content/zh/learn/level_9/lesson_48/_index.md new file mode 100644 index 000000000..813ae01da --- /dev/null +++ b/content/zh/learn/level_9/lesson_48/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群常见问题及解决办法 +weight: 48 + +_build: + render: false + +profit: 常见问题及解决办法 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_48/content.md b/content/zh/learn/level_9/lesson_48/content.md new file mode 100644 index 000000000..19796d0fc --- /dev/null +++ b/content/zh/learn/level_9/lesson_48/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群常见问题及解决办法 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_48/video.md b/content/zh/learn/level_9/lesson_48/video.md new file mode 100644 index 000000000..73422f005 --- /dev/null +++ b/content/zh/learn/level_9/lesson_48/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 集群常见问题及解决办法 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/48%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97-%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98%E5%8F%8A%E8%A7%A3%E5%86%B3%E5%8A%9E%E6%B3%95.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_49/_index.md b/content/zh/learn/level_9/lesson_49/_index.md new file mode 100644 index 000000000..edba90a0d --- /dev/null +++ b/content/zh/learn/level_9/lesson_49/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Prometheus 安装使用 +weight: 49 + +_build: + render: false + +profit: Prometheus 起源介绍及安装使用 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_49/content.md b/content/zh/learn/level_9/lesson_49/content.md new file mode 100644 index 000000000..860c3c06a --- /dev/null +++ b/content/zh/learn/level_9/lesson_49/content.md @@ -0,0 +1,8 @@ +--- +title: Prometheus 安装使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Prometheus%20%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5%E4%B8%8E%E4%B8%8A%E6%89%8B%E5%AE%9E%E8%B7%B5.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_49/video.md b/content/zh/learn/level_9/lesson_49/video.md new file mode 100644 index 000000000..942c1faa3 --- /dev/null +++ b/content/zh/learn/level_9/lesson_49/video.md @@ -0,0 +1,9 @@ +--- +title: Prometheus 安装使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/49%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-Prometheus%20%E5%AE%89%E8%A3%85%E4%BD%BF%E7%94%A8.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_50/_index.md b/content/zh/learn/level_9/lesson_50/_index.md new file mode 100644 index 000000000..e4f3ff878 --- /dev/null +++ b/content/zh/learn/level_9/lesson_50/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: PromQL 介绍 +weight: 50 + +_build: + render: false + +profit: PromQL 表达式类型、语法及指标类型 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_50/content.md b/content/zh/learn/level_9/lesson_50/content.md new file mode 100644 index 000000000..5202c1aa5 --- /dev/null +++ b/content/zh/learn/level_9/lesson_50/content.md @@ -0,0 +1,8 @@ +--- +title: PromQL 介绍 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Prometheus%20%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5%E4%B8%8E%E4%B8%8A%E6%89%8B%E5%AE%9E%E8%B7%B5.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_50/video.md b/content/zh/learn/level_9/lesson_50/video.md new file mode 100644 index 000000000..6fe4c5b06 --- /dev/null +++ b/content/zh/learn/level_9/lesson_50/video.md @@ -0,0 +1,9 @@ +--- +title: PromQL 介绍 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/50%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-PromQL%20%E4%BB%8B%E7%BB%8D.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_51/_index.md b/content/zh/learn/level_9/lesson_51/_index.md new file mode 100644 index 000000000..51b2c7a6f --- /dev/null +++ b/content/zh/learn/level_9/lesson_51/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Prometheus 告警处理 +weight: 51 + +_build: + render: false + +profit: Prometheus 告警配置及查看 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_51/content.md b/content/zh/learn/level_9/lesson_51/content.md new file mode 100644 index 000000000..01b27a799 --- /dev/null +++ b/content/zh/learn/level_9/lesson_51/content.md @@ -0,0 +1,8 @@ +--- +title: Prometheus 告警处理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Prometheus%20%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5%E4%B8%8E%E4%B8%8A%E6%89%8B%E5%AE%9E%E8%B7%B5.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_51/video.md b/content/zh/learn/level_9/lesson_51/video.md new file mode 100644 index 000000000..7d300b11f --- /dev/null +++ b/content/zh/learn/level_9/lesson_51/video.md @@ -0,0 +1,9 @@ +--- +title: Prometheus 告警处理 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/51%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-Prometheus%20%E5%91%8A%E8%AD%A6%E5%A4%84%E7%90%86.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_52/_index.md b/content/zh/learn/level_9/lesson_52/_index.md new file mode 100644 index 000000000..0c53cc3eb --- /dev/null +++ b/content/zh/learn/level_9/lesson_52/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Prometheus Operator 安装使用与高级配置 +weight: 52 + +_build: + render: false + +profit: Prometheus Operator 架构介绍、安装与配置解析 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_52/content.md b/content/zh/learn/level_9/lesson_52/content.md new file mode 100644 index 000000000..5ed70687d --- /dev/null +++ b/content/zh/learn/level_9/lesson_52/content.md @@ -0,0 +1,8 @@ +--- +title: Prometheus Operator 安装使用与高级配置 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/Prometheus%20%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5%E4%B8%8E%E4%B8%8A%E6%89%8B%E5%AE%9E%E8%B7%B5.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_52/video.md b/content/zh/learn/level_9/lesson_52/video.md new file mode 100644 index 000000000..cee9acbac --- /dev/null +++ b/content/zh/learn/level_9/lesson_52/video.md @@ -0,0 +1,9 @@ +--- +title: Prometheus Operator 安装使用与高级配置 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/52%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-Prometheus%20Operator%20%E5%AE%89%E8%A3%85%E4%BD%BF%E7%94%A8%E4%B8%8E%E9%AB%98%E7%BA%A7%E9%85%8D%E7%BD%AE.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_53/_index.md b/content/zh/learn/level_9/lesson_53/_index.md new file mode 100644 index 000000000..75ce0e155 --- /dev/null +++ b/content/zh/learn/level_9/lesson_53/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 监控功能与使用 +weight: 53 + +_build: + render: false + +profit: KubeSphere 监控系统介绍及使用 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_53/content.md b/content/zh/learn/level_9/lesson_53/content.md new file mode 100644 index 000000000..08aeb7fe6 --- /dev/null +++ b/content/zh/learn/level_9/lesson_53/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 监控功能与使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E7%9B%91%E6%8E%A7%E5%8A%9F%E8%83%BD%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_53/video.md b/content/zh/learn/level_9/lesson_53/video.md new file mode 100644 index 000000000..6955b7144 --- /dev/null +++ b/content/zh/learn/level_9/lesson_53/video.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 监控功能与使用 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/53%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E7%9B%91%E6%8E%A7%E5%8A%9F%E8%83%BD%E4%B8%8E%E4%BD%BF%E7%94%A8.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_54/_index.md b/content/zh/learn/level_9/lesson_54/_index.md new file mode 100644 index 000000000..56a56b25d --- /dev/null +++ b/content/zh/learn/level_9/lesson_54/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 应用状态监控 +weight: 54 + +_build: + render: false + +profit: KubeSphere 应用资源监控 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_54/content.md b/content/zh/learn/level_9/lesson_54/content.md new file mode 100644 index 000000000..407ca9def --- /dev/null +++ b/content/zh/learn/level_9/lesson_54/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 应用状态监控 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E7%9B%91%E6%8E%A7%E5%8A%9F%E8%83%BD%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_54/video.md b/content/zh/learn/level_9/lesson_54/video.md new file mode 100644 index 000000000..efe2d75a1 --- /dev/null +++ b/content/zh/learn/level_9/lesson_54/video.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 应用状态监控 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/54%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E5%BA%94%E7%94%A8%E7%8A%B6%E6%80%81%E7%9B%91%E6%8E%A7.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_55/_index.md b/content/zh/learn/level_9/lesson_55/_index.md new file mode 100644 index 000000000..63b39896e --- /dev/null +++ b/content/zh/learn/level_9/lesson_55/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 集群状态监控 +weight: 55 + +_build: + render: false + +profit: Kubernetes 集群状态监控介绍 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_55/content.md b/content/zh/learn/level_9/lesson_55/content.md new file mode 100644 index 000000000..641cc7af1 --- /dev/null +++ b/content/zh/learn/level_9/lesson_55/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 集群状态监控 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E7%9B%91%E6%8E%A7%E5%8A%9F%E8%83%BD%E4%B8%8E%E4%BD%BF%E7%94%A8.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_55/video.md b/content/zh/learn/level_9/lesson_55/video.md new file mode 100644 index 000000000..dd939c2e3 --- /dev/null +++ b/content/zh/learn/level_9/lesson_55/video.md @@ -0,0 +1,9 @@ +--- +title: 课程简介 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/46%E3%80%81Kubernetes%20%E9%9B%86%E7%BE%A4%E4%B8%8E%E5%BA%94%E7%94%A8%E6%97%A5%E5%BF%97-%E7%94%9F%E4%BA%A7%E9%83%A8%E7%BD%B2%E6%9C%80%E4%BD%B3%E5%AE%9E%E8%B7%B5.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_56/_index.md b/content/zh/learn/level_9/lesson_56/_index.md new file mode 100644 index 000000000..163365a20 --- /dev/null +++ b/content/zh/learn/level_9/lesson_56/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 基于租户的告警与通知 +weight: 56 + +_build: + render: false + +profit: KubeSphere 基于租户的告警功能介绍及告警策略配置 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_56/content.md b/content/zh/learn/level_9/lesson_56/content.md new file mode 100644 index 000000000..29622e6ae --- /dev/null +++ b/content/zh/learn/level_9/lesson_56/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 基于租户的告警与通知 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E5%9F%BA%E4%BA%8E%E7%A7%9F%E6%88%B7%E7%9A%84%E5%91%8A%E8%AD%A6%E4%B8%8E%E9%80%9A%E7%9F%A5.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_56/video.md b/content/zh/learn/level_9/lesson_56/video.md new file mode 100644 index 000000000..dbadf68f9 --- /dev/null +++ b/content/zh/learn/level_9/lesson_56/video.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 基于租户的告警与通知 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/56%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E5%9F%BA%E4%BA%8E%E7%A7%9F%E6%88%B7%E7%9A%84%E5%91%8A%E8%AD%A6%E4%B8%8E%E9%80%9A%E7%9F%A5.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_57/_index.md b/content/zh/learn/level_9/lesson_57/_index.md new file mode 100644 index 000000000..2f0acfc2a --- /dev/null +++ b/content/zh/learn/level_9/lesson_57/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: KubeSphere 自定义监控 +weight: 57 + +_build: + render: false + +profit: KubeSphere 自定义监控面板配置及实战 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_57/content.md b/content/zh/learn/level_9/lesson_57/content.md new file mode 100644 index 000000000..74e29bf9f --- /dev/null +++ b/content/zh/learn/level_9/lesson_57/content.md @@ -0,0 +1,8 @@ +--- +title: KubeSphere 自定义监控 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: https://pek3b.qingstor.com/kubesphere-community/pdf/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E8%87%AA%E5%AE%9A%E4%B9%89%E7%9B%91%E6%8E%A7.pdf + +--- diff --git a/content/zh/learn/level_9/lesson_57/video.md b/content/zh/learn/level_9/lesson_57/video.md new file mode 100644 index 000000000..7d4ae6f98 --- /dev/null +++ b/content/zh/learn/level_9/lesson_57/video.md @@ -0,0 +1,9 @@ +--- +title: KubeSphere 自定义监控 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/57%E3%80%81%E7%9B%91%E6%8E%A7%E4%B8%8E%E5%91%8A%E8%AD%A6-KubeSphere%20%E8%87%AA%E5%AE%9A%E4%B9%89%E7%9B%91%E6%8E%A7.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_58/_index.md b/content/zh/learn/level_9/lesson_58/_index.md new file mode 100644 index 000000000..4e023c888 --- /dev/null +++ b/content/zh/learn/level_9/lesson_58/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 审计与事件 +weight: 58 + +_build: + render: false + +profit: 通过 Kubernetes 审计和事件查询定位故障 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_58/content.md b/content/zh/learn/level_9/lesson_58/content.md new file mode 100644 index 000000000..1aa4dcccc --- /dev/null +++ b/content/zh/learn/level_9/lesson_58/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 审计与事件 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: + +--- diff --git a/content/zh/learn/level_9/lesson_58/video.md b/content/zh/learn/level_9/lesson_58/video.md new file mode 100644 index 000000000..bacfb0650 --- /dev/null +++ b/content/zh/learn/level_9/lesson_58/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 审计与事件 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/58%E3%80%81%E5%8F%AF%E8%A7%82%E6%B5%8B%E6%80%A7-Kubernetes%20%E5%AE%A1%E8%AE%A1%E4%B8%8E%E4%BA%8B%E4%BB%B6.mp4 + +--- diff --git a/content/zh/learn/level_9/lesson_59/_index.md b/content/zh/learn/level_9/lesson_59/_index.md new file mode 100644 index 000000000..4459d0d19 --- /dev/null +++ b/content/zh/learn/level_9/lesson_59/_index.md @@ -0,0 +1,10 @@ +--- +linkTitle: Kubernetes 计量计费 +weight: 59 + +_build: + render: false + +profit: Kubernetes 计量计费配置及资源消费统计 +time: 2021-12-17 20:00-20:40 +--- diff --git a/content/zh/learn/level_9/lesson_59/content.md b/content/zh/learn/level_9/lesson_59/content.md new file mode 100644 index 000000000..987385a8c --- /dev/null +++ b/content/zh/learn/level_9/lesson_59/content.md @@ -0,0 +1,8 @@ +--- +title: Kubernetes 计量计费 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +pdfUrl: + +--- diff --git a/content/zh/learn/level_9/lesson_59/video.md b/content/zh/learn/level_9/lesson_59/video.md new file mode 100644 index 000000000..6b54d0229 --- /dev/null +++ b/content/zh/learn/level_9/lesson_59/video.md @@ -0,0 +1,9 @@ +--- +title: Kubernetes 计量计费 +keywords: Kubesphere, Kubesphere learn +description: Kubesphere + +video: + videoUrl: https://pek3b.qingstor.com/kubesphere-community/videos/%E4%BA%91%E5%8E%9F%E7%94%9F%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E6%9C%9F/59%E3%80%81%E5%8F%AF%E8%A7%82%E6%B5%8B%E6%80%A7-Kubernetes%20%E8%AE%A1%E9%87%8F%E8%AE%A1%E8%B4%B9.mp4 + +--- diff --git a/content/zh/live/3.1-live.md b/content/zh/live/3.1-live.md index a7482ca79..9267638db 100644 --- a/content/zh/live/3.1-live.md +++ b/content/zh/live/3.1-live.md @@ -20,4 +20,8 @@ KubeSphere 3.1 全新发布!主打 “延伸至边缘侧的容器混合云” 此次交流会特面向社区开放交流,将为大家演示 KubeSphere 3.1 新特性与后续规划。 -![3.1live](https://pek3b.qingstor.com/kubesphere-community/images/3.1live-poster.jpg) \ No newline at end of file +![3.1live](https://pek3b.qingstor.com/kubesphere-community/images/3.1live-poster.jpg) + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `3.1.0` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/_index.md b/content/zh/live/_index.md index 1239ae346..918497e03 100644 --- a/content/zh/live/_index.md +++ b/content/zh/live/_index.md @@ -1,7 +1,7 @@ --- title: live - KubeSphere | Enterprise container platform, built on Kubernetes description: KubeSphere is an open source container platform based on Kubernetes for enterprise app development and deployment, suppors installing anywhere from on-premise datacenter to any cloud to edge. -keywords: KubeSphere,DevOps,Istio,Service Mesh,Jenkins +keywords: KubeSphere, DevOps, Istio, Service Mesh, Jenkins css: "scss/live.scss" section1: @@ -9,23 +9,128 @@ section1: image: /images/live/background.jpg section2: - image: /images/live/cloudnative-live-cover.png - url: ./uisee0923-live/ + image: /images/live/cloudnative-live-banner.png + url: ./openfunction0113-live/ notice: - title: GitOps 应用简介 - tag: 预告 - time: 2021 年 10 月 14 日晚 8 点 + title: MQTT 及车联网场景应用 + tag: 结束 + time: 2021 年 12 月 30 日晚 8 点 base: 线上 - url: ./yunda1014-live/ + url: ./mqtt1230-live/ over: - title: Kubernetes 控制器原理简介 - url: ./uisee0916-live/ + title: 携程分布式存储实践 + url: ./ceph1216-live/ tag: 结束 section3: videos: + - title: OpenFunction v0.5.0 新特性讲解与 v0.6.0 展望 + link: ./openfunction0113-live/ + snapshot: http://pek3b.qingstor.com/kubesphere-community/images/openfunction0113-live-cover.png + type: iframe + createTime: 2022.01.13 + group: 直播回放 + + - title: MQTT 及车联网场景应用 + link: ./mqtt1230-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/mqtt1230-live-cover.png + type: iframe + createTime: 2021.12.30 + group: 直播回放 + + - title: 函数计算应用场景探讨及 FaaS 设计和实现 + link: ./faas1223-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/faas1223-live-cover.png + type: iframe + createTime: 2021.12.23 + group: 直播回放 + + - title: 携程分布式存储实践 + link: ./ceph1216-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/ceph1216-live-cover.png + type: iframe + createTime: 2021.12.16 + group: 直播回放 + + - title: KubeKey v2.0.0 上手指南 + link: ./kubekey1209-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/kubekey1209-live-cover.png + type: iframe + createTime: 2021.12.09 + group: 直播回放 + + - title: 浅谈 Webhook 开发与实践 + link: ./webhook1202-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/webhook1202-live-cover.png + type: iframe + createTime: 2021.12.02 + group: 直播回放 + + - title: ROOK 云原生分布式存储开源项目的介绍及其在企业中的应用未来 + link: ./rook1111-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/rook1111-live-cover.png + type: iframe + createTime: 2021.11.11 + group: 直播回放 + + - title: 海量并发微服务框架设计 + link: ./go1104-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/go1104-live-cover.png + type: iframe + createTime: 2021.11.04 + group: 直播回放 + + - title: 如何优化容器网络性能 + link: ./kubeovn1028-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/kubeovn1028-live-cover.png + type: iframe + createTime: 2021.10.28 + group: 直播回放 + + - title: JuiceFS CSI Driver 的最佳实践 + link: ./hangzhou1023-juicefs/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/hangzhou1023-juicefs-cover.png + type: iframe + createTime: 2021.10.23 + group: Meetup + + - title: KubeSphere DevOps 越开放,越强大 + link: ./hangzhou1023-devops/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/hangzhou1023-devops-cover.png + type: iframe + createTime: 2021.10.23 + group: Meetup + + - title: 集群镜像重塑分布式应用交付 + link: ./hangzhou1023-sealer/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/hangzhou1023-sealer-cover.png + type: iframe + createTime: 2021.10.23 + group: Meetup + + - title: 新一代高可用 MySQL K8s Operator 源码解析 + link: ./hangzhou1023-mysql/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/hangzhou1023-mysql-cover.png + type: iframe + createTime: 2021.10.23 + group: Meetup + + - title: 云原生区块链探索之路 + link: ./hangzhou1023-blockchain/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/hangzhou1023-blockchain-cover.png + type: iframe + createTime: 2021.10.23 + group: Meetup + + - title: CKA/CKS 备考攻略 + link: ./ckacks1021-live/ + snapshot: https://pek3b.qingstor.com/kubesphere-community/images/ckacks1021-live-cover.png + type: iframe + createTime: 2021.10.21 + group: 直播回放 + - title: Kubebuilder 使用简介 link: ./uisee0923-live/ snapshot: https://pek3b.qingstor.com/kubesphere-community/images/uisee0923-live-cover.png @@ -337,38 +442,31 @@ section3: section4: overImg: /images/live/over.svg noticeImg: /images/live/notice.svg + title: Meetup 回顾 list: - - title: Apache APISIX Ingress Controller 实现与上手实践 - date: 08/26 - time: 20:00 - 21:00 - lastTime: 2021-08-26T21:00:00Z - url: ./apisix826-live/ + - year: 2021 + meetup: - - title: 使用 Flomesh 进行 Dubbo 微服务的服务治理 - date: 08/19 - time: 20:00 - 21:00 - lastTime: 2021-08-19T21:00:00Z - url: ./pipy819-live/ - - - title: KubeSphere v3.1 开源社区交流会直播回放 - date: 04/29 - time: 20:00 - 21:00 - lastTime: 2021-04-29T21:00:00Z - url: ./3.1-live/ - - - title: 基于 KubeSphere 与 BotKube 搭建 K8s 多集群监控告警体系 - date: 01/14 - time: 20:00 - 21:00 - lastTime: 2021-01-14T21:00:00Z - url: ./botkube-live/ - - - title: 企业级云原生多租户通知系统 Notification Manager - date: 01/06 - time: 20:00 - 21:00 - lastTime: 2021-01-06T21:00:00Z - url: ./nm-live/ + - place: 杭州站(10.23) + img: https://pek3b.qingstor.com/kubesphere-community/images/meetup-hangzhou1023-cover.png + meetupUrl: https://kubesphere.com.cn/live/meetup-hangzhou1023/ + + - place: 北京站(7.29) + img: https://pek3b.qingstor.com/kubesphere-community/images/meetup-beijing-cover.png + meetupUrl: https://kubesphere.com.cn/live/meetup-beijing/ + + - place: 成都站(6.19) + img: https://pek3b.qingstor.com/kubesphere-community/images/meetup-chengdu-cover.png + meetupUrl: https://kubesphere.com.cn/live/meetup-chengdu/ + - place: 杭州站(5.29) + img: https://pek3b.qingstor.com/kubesphere-community/images/meetup-hangzhou-cover.png + meetupUrl: https://kubesphere.com.cn/live/meetup-hangzhou/ + - place: 上海站(5.15) + img: https://pek3b.qingstor.com/kubesphere-community/images/meetup-shanghai-cover.png + meetupUrl: https://kubesphere.com.cn/live/meetup-shanghai/ + section5: title: 分享您的主题 content: 您是否也想在社区分享您的云原生实践经验?即可加入 KubeSphere 社区云原生直播计划,提交分享主题,将有 KubeSphere 周边礼品相送!最重要的是您可以:提升自身软实力,如演讲能力、总结能力;提高自身在云原生领域的知名度;展现企业的技术实力及优秀想法;帮助推广您社区开源的项目。 diff --git a/content/zh/live/ai-cic.md b/content/zh/live/ai-cic.md index e23f8b416..c86270a6f 100644 --- a/content/zh/live/ai-cic.md +++ b/content/zh/live/ai-cic.md @@ -29,5 +29,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 diff --git a/content/zh/live/apisix-chengdu.md b/content/zh/live/apisix-chengdu.md index 7c765a3bc..874da6f57 100644 --- a/content/zh/live/apisix-chengdu.md +++ b/content/zh/live/apisix-chengdu.md @@ -31,5 +31,5 @@ Apache APISIX 是一个高性能、全动态的云原生 API 网关,因其优 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 diff --git a/content/zh/live/apisix826-live.md b/content/zh/live/apisix826-live.md index 533b1c1cf..819828378 100644 --- a/content/zh/live/apisix826-live.md +++ b/content/zh/live/apisix826-live.md @@ -38,4 +38,4 @@ B 站 http://live.bilibili.com/22580654 ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `2021826` 即可下载 PPT。 \ No newline at end of file +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `2021826` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/baas-chengdu.md b/content/zh/live/baas-chengdu.md index 512295c18..64818bde4 100644 --- a/content/zh/live/baas-chengdu.md +++ b/content/zh/live/baas-chengdu.md @@ -30,5 +30,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 diff --git a/content/zh/live/botkube-live.md b/content/zh/live/botkube-live.md index a572908e8..bde317bec 100644 --- a/content/zh/live/botkube-live.md +++ b/content/zh/live/botkube-live.md @@ -28,4 +28,4 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 0114 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 0114 即可下载 PPT。 diff --git a/content/zh/live/ceph1216-live.md b/content/zh/live/ceph1216-live.md new file mode 100644 index 000000000..0ac67a154 --- /dev/null +++ b/content/zh/live/ceph1216-live.md @@ -0,0 +1,44 @@ +--- +title: 携程分布式存储实践 +description: 本次直播将分享携程近几年 Ceph 的发展历程,监控告警,相关实践,io_uring 使用测试,及混合云架构探索。 +keywords: KubeSphere, Kubernetes, Ceph +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=464989966&bvid=BV1DL411j7wV&cid=462915303&page=1&high_quality=1 + type: iframe + time: 2021-12-16 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +在海量数据时代,面向应用程序和用户的数据存储规模在逐渐扩大。数据量不断增长,也驱动着我们寻找更好的方法来满足用户需求、保护自身的数据。Ceph 在携程数据存储中发展中扮演了不可或缺的角色。 + +本次直播将分享携程近几年 Ceph 的发展历程,监控告警,相关实践,io_uring 使用测试,及混合云架构探索。 + +## 讲师简介 + +张搏航,高级软件工程师。 + +个人简介: +张搏航,高级软件工程师。2016 年加入携程,目前就职于系统研发团队。主要负责分布式存储架构调优,性能优化和大数据相关运维工作。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/ceph1216-live.png) + +## 直播时间 + +2021 年 12 月 16 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211216` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/ckacks1021-live.md b/content/zh/live/ckacks1021-live.md new file mode 100644 index 000000000..2bb7bdf76 --- /dev/null +++ b/content/zh/live/ckacks1021-live.md @@ -0,0 +1,41 @@ +--- +title: CKA/CKS 备考攻略 +description: CKA/CKS 是 CNCF 官方 Kubernetes 认证考试,本次主要分享参加 CKA/CKS 认证考试时的备考经验及心得体会,希望能够帮助想要了解和计划考取认证的同学快速了解考试,掌握备考方法,轻松获取认证。 +keywords: KubeSphere, Kubernetes, CKA, CKS +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=976126102&bvid=BV1Z44y1x7e3&cid=428904630&page=1&high_quality=1 + type: iframe + time: 2021-10-21 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +CKA/CKS 是 CNCF 官方 Kubernetes 认证考试,本次主要分享参加 CKA/CKS 认证考试时的备考经验及心得体会,希望能够帮助想要了解和计划考取认证的同学快速了解考试,掌握备考方法,轻松获取认证。 + +## 讲师简介 + +郭峰,KubeSphere 研发工程师 + +个人简介: +KubeSphere 研发工程师,openEuler 社区 Cloud Native SIG Maintainer,CKA/CKS 认证的持有者。 + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/ckacks1021-live.png) + +## 直播时间 + +2021 年 10 月 21 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211021` 即可下载 PPT。 diff --git a/content/zh/live/devops-shanghai.md b/content/zh/live/devops-shanghai.md index a21db5cfd..af2f7a4f0 100644 --- a/content/zh/live/devops-shanghai.md +++ b/content/zh/live/devops-shanghai.md @@ -30,7 +30,7 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/driverless-cic.md b/content/zh/live/driverless-cic.md index b20a81833..0fadee21f 100644 --- a/content/zh/live/driverless-cic.md +++ b/content/zh/live/driverless-cic.md @@ -31,5 +31,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 diff --git a/content/zh/live/edgebox-cic.md b/content/zh/live/edgebox-cic.md index 6491efcef..5681c12c1 100644 --- a/content/zh/live/edgebox-cic.md +++ b/content/zh/live/edgebox-cic.md @@ -29,5 +29,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 diff --git a/content/zh/live/faas-hangzhou.md b/content/zh/live/faas-hangzhou.md index d2e433fe3..cc11ecf2d 100644 --- a/content/zh/live/faas-hangzhou.md +++ b/content/zh/live/faas-hangzhou.md @@ -32,4 +32,4 @@ OpenFunction 项目发起人,KubeSphere 可观测性、边缘计算相关产 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/faas1223-live.md b/content/zh/live/faas1223-live.md new file mode 100644 index 000000000..8f7c8906f --- /dev/null +++ b/content/zh/live/faas1223-live.md @@ -0,0 +1,42 @@ +--- +title: 函数计算应用场景探讨及 FaaS 设计和实现 +description: 从社区开源的 Knative、OpenFaaS、OpenFunction 入手 到 Lambda、Cloud Run 等商业产品,讲解 FaaS 的使用和经典设计,抛砖引玉介绍"旷视"内部的 FaaS 设计和实现。 +keywords: KubeSphere, Kubernetes, FaaS, OpenFunction, Knative, OpenFaaS +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=935107557&bvid=BV1DT4y1f7TG&cid=467466159&page=1&high_quality=1 + type: iframe + time: 2021-12-23 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +FaaS 是什么,为什么需要了解 FaaS,以及 FaaS 如何实现,我们怎么用?从社区开源的 Knative、OpenFaaS、OpenFunction 入手 到 Lambda、Cloud Run 等商业产品,讲解 FaaS 的使用和经典设计,抛砖引玉介绍"旷视"内部的 FaaS 设计和实现。 + +## 讲师简介 + +王续,旷视科技资深工程师 + +个人简介: +王续,旷视科技资深工程师。专注于云原生,负责公司内 PaaS、FaaS 平台的设计、实现。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/faas1223-live.png) + +## 直播时间 + +2021 年 12 月 23 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211223` 即可下载 PPT。 diff --git a/content/zh/live/go1104-live.md b/content/zh/live/go1104-live.md new file mode 100644 index 000000000..e9209bfd8 --- /dev/null +++ b/content/zh/live/go1104-live.md @@ -0,0 +1,55 @@ +--- +title: 海量并发微服务框架设计 +description: 本次分享将详细讲解微服务治理的工程实践和研发效率工具的建设。通过这次分享,你可以从整体上理解一个高并发的微服务框架的设计思路,并且可以把很多工程实践思路带入到平时工作中,从而得到更好的服务治理能力和更高的开发效率。 +keywords: KubeSphere, Kubernetes, Microservice +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=633965485&bvid=BV1Gb4y187un&cid=436661201&page=1&high_quality=1 + type: iframe + time: 2021-11-04 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +本次分享将从以下几个方面详细讲解微服务治理的工程实践和研发效率工具的建设: +- 微服务如何拆分数据 +- K8s 内外的服务发现机制 +- 自适应负载均衡 +- 如何应对突发流量尖峰 +- 海量并发缓存如何设计 +- 微服务可观测性 +- 极致的效率工具建设 + +通过这次分享,你可以从整体上理解一个高并发的微服务框架的设计思路,并且可以把很多工程实践思路带入到平时工作中,从而得到更好的服务治理能力和更高的开发效率。 + +## 讲师简介 + +万俊峰,go-zero 作者 + +个人简介: +万俊峰毕业后先后就职于两家美企,从事高性能计算和互联网后端研发工作。2007 年开始合伙创业,并任职 CTO,他拥有 11 年的社交 App 创业和并购经验。之后于 2018 年加入晓黑板,担任 CTO。 +他热爱开源技术,拥有 20 年的开发、微服务架构经验以及十多年的技术团队管理经验。在微服务架构、分布式系统、机器学习模型上有深入研究。 + +他还是持续学习者,主张系统和架构尽可能保持简单,工具大于文档和约定的理念。他经常参加业界的技术大会,是 ArchSummit 全球架构师峰会的明星讲师,GopherChina 大会的主持人和金牌讲师,QCon+ Go 专题出品人&讲师,也是腾讯云开发者大会的讲师。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/go1104-live.png) + +## 直播时间 + +2021 年 11 月 04 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211104` 即可下载 PPT。 + diff --git a/content/zh/live/hangzhou1023-blockchain.md b/content/zh/live/hangzhou1023-blockchain.md new file mode 100644 index 000000000..e879a5fad --- /dev/null +++ b/content/zh/live/hangzhou1023-blockchain.md @@ -0,0 +1,29 @@ +--- +title: 云原生区块链探索之路 +description: 联盟链与云原生在技术方面有许多的相似之处,也有很多互补的地方。自去年提出云原生区块链的概念之后,我们在两者结合方面做了一些尝试。本次分享,会带大家回顾一下我们的思路,介绍我们最近的进展,以及遇到的问题,最后展望未来的发展路线。 +keywords: KubeSphere,Kubernetes,云原生,Blockchain +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=633793789&bvid=BV1Sb4y1h7eb&cid=430844218&page=1&high_quality=1 + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 线下 + 线上 + baseIcon: /images/live/base.svg +--- + +## 分享人简介 + +宁志伟 + +溪塔科技,首席架构师 + +## 分享主题介绍 + +联盟链与云原生在技术方面有许多的相似之处,也有很多互补的地方。自去年提出云原生区块链的概念之后,我们在两者结合方面做了一些尝试。本次分享,会带大家回顾一下我们的思路,介绍我们最近的进展,以及遇到的问题,最后展望未来的发展路线。 + +## 下载 PPT + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “云原生杭州1023” 即可下载 PPT。 diff --git a/content/zh/live/hangzhou1023-devops.md b/content/zh/live/hangzhou1023-devops.md new file mode 100644 index 000000000..85a5308f1 --- /dev/null +++ b/content/zh/live/hangzhou1023-devops.md @@ -0,0 +1,29 @@ +--- +title: KubeSphere DevOps 越开放,越强大 +description: 开源社区只有保持开放和中立的态度,才能越来越强大,才能像大海一样海纳百川。KubeSphere 社区将这两点做到了极致,不仅开放了源代码,还公开了社区的各项决策事宜,所有社区用户都可以参与推动社区的发展;同时 KubeSphere DevOps 项目接下来会和 KubeSphere 解耦,将后端项目从 KubeSphere 中完全抽离,使 KubeSphere DevOps 的适用性更广。最终社区、项目、贡献者以及下游用户都会越来越强大! +keywords: KubeSphere,Kubernetes,DevOps,云原生 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=763750235&bvid=BV1Rr4y1y7wF&cid=430838015&page=1&high_quality=1 + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 线下 + 线上 + baseIcon: /images/live/base.svg +--- + +## 分享人简介 + +Rick + +青云科技,KubeSphere 研发工程师 + +## 分享主题介绍 + +开源社区只有保持开放和中立的态度,才能越来越强大,才能像大海一样海纳百川。KubeSphere 社区将这两点做到了极致,不仅开放了源代码,还公开了社区的各项决策事宜,所有社区用户都可以参与推动社区的发展;同时 KubeSphere DevOps 项目接下来会和 KubeSphere 解耦,将后端项目从 KubeSphere 中完全抽离,使 KubeSphere DevOps 的适用性更广。最终社区、项目、贡献者以及下游用户都会越来越强大! + +## 下载 PPT + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “云原生杭州1023” 即可下载 PPT。 diff --git a/content/zh/live/hangzhou1023-juicefs.md b/content/zh/live/hangzhou1023-juicefs.md new file mode 100644 index 000000000..eb2b33ed3 --- /dev/null +++ b/content/zh/live/hangzhou1023-juicefs.md @@ -0,0 +1,29 @@ +--- +title: JuiceFS CSI Driver 的最佳实践 +description: JuiceFS 基于云原生环境的基础设施和软件设计,可以简单轻松的在各种云原生环境中部署,包括私有云、混合云以及公有云托管的 Kubernetes 环境中。JuiceFS CSI Driver 面对真实的业务挑战,实现了架构的全面升级。本次主要介绍架构升级中遇到的挑战、全新的架构设计以及带来的收益。 +keywords: KubeSphere,Kubernetes,JuiceFS,云原生存储 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=676257371&bvid=BV1ZU4y1F7kK&cid=430831516&page=1&high_quality=1 + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 线下 + 线上 + baseIcon: /images/live/base.svg +--- + +## 分享人简介 + +朱唯唯 + +Juicedata,全栈工程师 + +## 分享主题介绍 + +JuiceFS 基于云原生环境的基础设施和软件设计,可以简单轻松的在各种云原生环境中部署,包括私有云、混合云以及公有云托管的 Kubernetes 环境中。JuiceFS CSI Driver 面对真实的业务挑战,实现了架构的全面升级。本次主要介绍架构升级中遇到的挑战、全新的架构设计以及带来的收益。 + +## 下载 PPT + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “云原生杭州1023” 即可下载 PPT。 diff --git a/content/zh/live/hangzhou1023-mysql.md b/content/zh/live/hangzhou1023-mysql.md new file mode 100644 index 000000000..617c95692 --- /dev/null +++ b/content/zh/live/hangzhou1023-mysql.md @@ -0,0 +1,29 @@ +--- +title: 新一代高可用 MySQL K8s Operator 源码解析 +description: 本次分享为大家介绍高可用 RadonDB MySQL 容器化项目。从该项目的 operator 工程脚手架选型,CRD 、 controller ,sidecar 几个视角展开源码解析。 +keywords: KubeSphere,Kubernetes,MySQL,operator +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=421345772&bvid=BV1j3411k7ye&cid=431626676&page=1&high_quality=1 + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 线下 + 线上 + baseIcon: /images/live/base.svg +--- + +## 分享人简介 + +高日耀 + +青云科技,RadonDB 资深数据库内核研发 + +## 分享主题介绍 + +本次分享为大家介绍高可用 RadonDB MySQL 容器化项目。从该项目的 operator 工程脚手架选型,CRD 、 controller ,sidecar 几个视角展开源码解析。 + +## 下载 PPT + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “云原生杭州1023” 即可下载 PPT。 diff --git a/content/zh/live/hangzhou1023-sealer.md b/content/zh/live/hangzhou1023-sealer.md new file mode 100644 index 000000000..b0a4121f2 --- /dev/null +++ b/content/zh/live/hangzhou1023-sealer.md @@ -0,0 +1,29 @@ +--- +title: 集群镜像重塑分布式应用交付 +description: 以行业 ISV 为例,集群镜像帮助企业解决了分布式软件的部署一致性难题、降低了交付出错率,最终指数级降低分布式软件的交付成本。受 Docker 等容器技术的启发,集群镜像将单机应用封装技术,上升到分布式集群维度,最终实现分布式软件的高效交付(build, share, run)。 +keywords: KubeSphere,Kubernetes,集群镜像,云原生 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=633860808&bvid=BV1Gb4y1a7J8&cid=430840894&page=1&high_quality=1 + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 线下 + 线上 + baseIcon: /images/live/base.svg +--- + +## 分享人简介 + +方海涛 + +阿里云技术专家,Sealer 项目发起人 + +## 分享主题介绍 + +集群镜像把整个集群看成一台服务器,把 K8s 看成云操作系统,实现整个集群的镜像化打包和交付,为企业级软件提供一种“开箱即用”的应用封装技术。以行业 ISV 为例,集群镜像帮助企业解决了分布式软件的部署一致性难题、降低了交付出错率,最终指数级降低分布式软件的交付成本。受 Docker 等容器技术的启发,集群镜像将单机应用封装技术,上升到分布式集群维度,最终实现分布式软件的高效交付(build, share, run)。 + +## 下载 PPT + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “云原生杭州1023” 即可下载 PPT。 diff --git a/content/zh/live/hpa-chengdu.md b/content/zh/live/hpa-chengdu.md index 2d4d96909..32dcc207e 100644 --- a/content/zh/live/hpa-chengdu.md +++ b/content/zh/live/hpa-chengdu.md @@ -29,5 +29,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 diff --git a/content/zh/live/kubeedge-hangzhou.md b/content/zh/live/kubeedge-hangzhou.md index 7e9493d24..adf782141 100644 --- a/content/zh/live/kubeedge-hangzhou.md +++ b/content/zh/live/kubeedge-hangzhou.md @@ -42,4 +42,4 @@ OpenFunction 项目发起人,KubeSphere 可观测性、边缘计算相关产 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/kubekey1209-live.md b/content/zh/live/kubekey1209-live.md new file mode 100644 index 000000000..78a8d55c1 --- /dev/null +++ b/content/zh/live/kubekey1209-live.md @@ -0,0 +1,43 @@ +--- +title: KubeKey v2.0.0 上手指南 +description: 本次分享就给大家介绍一下 KubeKey v2.0.0 的整体架构和重要改动,帮助大家快速上手 KubeKey v2.0.0。 +keywords: KubeSphere, Kubernetes, KubeKey +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=379700194&bvid=BV1JZ4y197bR&cid=458198382&page=1&high_quality=1 + type: iframe + time: 2021-12-09 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +KubeKey 是一款帮助用户搭建 Kubernetes 集群的安装工具,其上手简单、功能丰富的特点深受社区用户喜爱。前段时间 KubeKey 公布了开发 v2.0.0 版本的计划,在经过几个月的开发工作后,最近代码已经合并到了仓库的主分支,并且发布了 alpha 版本。本次分享就给大家介绍一下 KubeKey v2.0.0 的整体架构和重要改动,帮助大家快速上手 KubeKey v2.0.0。 + +## 讲师简介 + +李耀宗,KubeSphere 后端研发工程师 + +个人简介: +李耀宗,目前就职于青云科技公司容器研发部,开源爱好者,KubeKey 维护者,Installtion SIG 成员。目前主要负责 KubeKey v2.0.0 相关开发工作。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubekey1209-live.png) + +## 直播时间 + +2021 年 12 月 09 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + + +## PPT 下载 + +可扫描官网底部二维码,关注「KubeSphere云原生」公众号,后台回复 `20211209` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/kubeovn1028-live.md b/content/zh/live/kubeovn1028-live.md new file mode 100644 index 000000000..482902347 --- /dev/null +++ b/content/zh/live/kubeovn1028-live.md @@ -0,0 +1,43 @@ +--- +title: 如何优化容器网络性能 +description: 容器平台的最大挑战之一是容器网络。本次分享会介绍如何从 ovs、内核、协议等多个角度去优化 Kube-OVN 的过程。 +keywords: KubeSphere, Kubernetes, Kube-OVN, 容器网络 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=591309774&bvid=BV17q4y1R7jk&cid=432621951&page=1&high_quality=1 + type: iframe + time: 2021-10-28 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +容器平台的最大挑战之一是容器网络。从前几年开始, 社区就有同行在做 CNI 性能测试,去年,随着用户量和知名度的增加,Kube-OVN 作为新兴的容器网络工具被引入了测试集合。 + +今年我们看到用户越来越关注容器网络的性能,因此,我们尝试了多种性能优化方案,包括优化传输时延和传输带宽。这次分享会介绍如何从 ovs、内核、协议等多个角度去优化 Kube-OVN 的过程。 + +## 讲师简介 + +刘韬,Kube-OVN 社区 Maintainer,灵雀云资深工程师, + +个人简介: +长期关注 SDN、网络虚拟化及网络性能优化。目前工作重点是扩展 Kube-OVN 的功能,包括基于 Kube-OVN 打通 OpenStack 和 K8s 的网络、Cilium 部分功能引入和 Kube-OVN 容器网络性能优化等。 + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/kubeovn1028-live.png) + +## 直播时间 + +2021 年 10 月 28 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211028` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/medialive-shanghai.md b/content/zh/live/medialive-shanghai.md index 25b149ec3..b2ca93b08 100644 --- a/content/zh/live/medialive-shanghai.md +++ b/content/zh/live/medialive-shanghai.md @@ -30,7 +30,7 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/meetup-beijing.md b/content/zh/live/meetup-beijing.md index 916573fd7..c5572ede2 100644 --- a/content/zh/live/meetup-beijing.md +++ b/content/zh/live/meetup-beijing.md @@ -13,6 +13,8 @@ section1: base: 北京市东城区建国门内大街 9 号北京国际饭店 + 线上同步直播 baseIcon: /images/live/base.svg --- +![](https://pek3b.qingstor.com/kubesphere-community/images/cic-group-20210729.jpeg) +
    KubeSphere and Friends 2021
    Kubernetes and Cloud Native Meetup
    diff --git a/content/zh/live/meetup-chengdu.md b/content/zh/live/meetup-chengdu.md index 40044eb45..a4c1b684a 100644 --- a/content/zh/live/meetup-chengdu.md +++ b/content/zh/live/meetup-chengdu.md @@ -13,6 +13,8 @@ section1: base: 四川省成都市高新区天府大道中段 500 号天祥广场 B 座 45A + 线上同步直播 baseIcon: /images/live/base.svg --- +![](https://pek3b.qingstor.com/kubesphere-community/images/meetup-chengdu-20210619.jpg) +
    KubeSphere and Friends 2021
    Kubernetes and Cloud Native Meetup
    diff --git a/content/zh/live/meetup-hangzhou.md b/content/zh/live/meetup-hangzhou.md index 9c7a23f62..b9e0c6efe 100644 --- a/content/zh/live/meetup-hangzhou.md +++ b/content/zh/live/meetup-hangzhou.md @@ -13,6 +13,8 @@ section1: base: 浙江省杭州市拱墅区丰潭路 430 号丰元国际大厦 A 座硬趣空间地下一层 + 线上同步直播 baseIcon: /images/live/base.svg --- +![](https://pek3b.qingstor.com/kubesphere-community/images/meetup-hangzhou-20210529.jpeg) +
    KubeSphere and Friends 2021
    Kubernetes and Cloud Native Meetup
    diff --git a/content/zh/live/meetup-hangzhou1023.md b/content/zh/live/meetup-hangzhou1023.md new file mode 100644 index 000000000..0f278283a --- /dev/null +++ b/content/zh/live/meetup-hangzhou1023.md @@ -0,0 +1,86 @@ +--- +title: 云原生技术交流 Meetup 杭州站 +description: 云原生技术交流 Meetup 杭州站,由 KubeSphere 社区杭州用户委员会发起和主办,KubeSphere 社区和 JuiceFS 社区联合主办,阿里云赞助,取得圆满成功。围绕“云原生存储、DevOps、集群镜像、云原生数据库、云原生区块链”等话题,来自 JuiceFS 社区、KubeSphere 社区、阿里云、RadonDB 社区以及溪塔科技的技术大牛和嘉宾带来了新的实践和思考。 +keywords: KubeSphere,Meetup,Hangzhou,JuiceFS,Kubernetes,DevOps,cluster,云原生,区块链 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: + type: iframe + time: 2021-10-23 14:00-18:00 + timeIcon: /images/live/clock.svg + base: 浙江杭州市西湖区黄龙时代广场 B 座 14F 东 527 + 线上同步直播 + baseIcon: /images/live/base.svg +--- +![](https://pek3b.qingstor.com/kubesphere-community/images/meetup-hangzhou-20211023.jpeg) + +
    云原生技术交流 Meetup 杭州站
    + +
    由 KubeSphere 社区杭州用户委员会发起和主办
    + +
    KubeSphere 社区和 JuiceFS 社区联合主办,阿里云赞助
    + +
    取得圆满成功 🎉🎉🎉
    + +
    围绕“云原生存储、DevOps、集群镜像、云原生数据库、云原生区块链”等话题
    + +
    来自 JuiceFS 社区、KubeSphere 社区、阿里云、RadonDB 社区以及溪塔科技的技术大牛和嘉宾
    + +
    带来了新的实践和思考
    + +
    一起来看看
    + +
    + +## JuiceFS CSI Driver 的最佳实践 + +讲师: 朱唯唯 + +个人介绍:Juicedata,全栈工程师 + +演讲概要:JuiceFS 基于云原生环境的基础设施和软件设计,可以简单轻松的在各种云原生环境中部署,包括私有云、混合云以及公有云托管的 Kubernetes 环境中。JuiceFS CSI Driver 面对真实的业务挑战,实现了架构的全面升级。本次主要介绍架构升级中遇到的挑战、全新的架构设计以及带来的收益。 + + + +## KubeSphere DevOps 越开放,越强大 + +讲师:Rick + +个人介绍:青云科技,KubeSphere 研发工程师 + +演讲概要:开源社区只有保持开放和中立的态度,才能越来越强大,才能像大海一样海纳百川。KubeSphere 社区将这两点做到了极致,不仅开放了源代码,还公开了社区的各项决策事宜,所有社区用户都可以参与推动社区的发展;同时 KubeSphere DevOps 项目接下来会和 KubeSphere 解耦,将后端项目从 KubeSphere 中完全抽离,使 KubeSphere DevOps 的适用性更广。最终社区、项目、贡献者以及下游用户都会越来越强大! + + + +## 集群镜像重塑分布式应用交付 + +讲师:方海涛 + +个人介绍:阿里云技术专家,Sealer 项目发起人 + +演讲概要:集群镜像把整个集群看成一台服务器,把 K8s 看成云操作系统,实现整个集群的镜像化打包和交付,为企业级软件提供一种“开箱即用”的应用封装技术。以行业 ISV 为例,集群镜像帮助企业解决了分布式软件的部署一致性难题、降低了交付出错率,最终指数级降低分布式软件的交付成本。受 Docker 等容器技术的启发,集群镜像将单机应用封装技术,上升到分布式集群维度,最终实现分布式软件的高效交付(build, share, run)。 + + + +## 新一代高可用 MySQL K8s Operator 源码解析 + +讲师:高日耀 + +个人介绍:青云科技,RadonDB 资深数据库内核研发 + +演讲概要:本次分享为大家介绍高可用 RadonDB MySQL 容器化项目。从该项目的 operator 工程脚手架选型,CRD 、 controller ,sidecar 几个视角展开源码解析。 + + + +## 云原生区块链探索之路 + +讲师:宁志伟 + +个人介绍:溪塔科技,首席工程师 + +演讲概要:联盟链与云原生在技术方面有许多的相似之处,也有很多互补的地方。自去年提出云原生区块链的概念之后,我们在两者结合方面做了一些尝试。本次分享,会带大家回顾一下我们的思路,介绍我们最近的进展,以及遇到的问题,最后展望未来的发展路线。 + + + +> 云原生技术交流 Meetup 杭州站圆满收官!可扫描官网底部二维码关注 「KubeSphere云原生」 公众号,后台回复 “云原生杭州1023” 获取下载链接。 \ No newline at end of file diff --git a/content/zh/live/meetup-shanghai.md b/content/zh/live/meetup-shanghai.md index 3b2bfa121..3711fa798 100644 --- a/content/zh/live/meetup-shanghai.md +++ b/content/zh/live/meetup-shanghai.md @@ -13,6 +13,8 @@ section1: base: 上海市虹口区杨树浦路 188 号 2 号楼 102 赤兔创业咖啡 + 线上同步直播 baseIcon: /images/live/base.svg --- +![](https://pek3b.qingstor.com/kubesphere-community/images/meetup-shanghai-20210515.jpeg) +
    KubeSphere and Friends 2021
    Kubernetes and Cloud Native Meetup
    diff --git a/content/zh/live/milvus-hangzhou.md b/content/zh/live/milvus-hangzhou.md index 788077e03..757cd950d 100644 --- a/content/zh/live/milvus-hangzhou.md +++ b/content/zh/live/milvus-hangzhou.md @@ -30,5 +30,5 @@ Zilliz 工程总监 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/mqtt1230-live.md b/content/zh/live/mqtt1230-live.md new file mode 100644 index 000000000..63b680e36 --- /dev/null +++ b/content/zh/live/mqtt1230-live.md @@ -0,0 +1,44 @@ +--- +title: MQTT 及车联网场景应用 +description: 本次分享期望能够让⼤家更深⼊了解物联⽹通信协议 MQTT:⾸先介绍 MQTT 协议基础;然后通过对⽐ MQTT Broker 和消息队列的异同点,并介绍一个云原生 MQTT Broker:EMQ X;最后和⼤家分享 MQTT 协议及 EMQ X 在⻋联⽹场景下的应⽤。 +keywords: KubeSphere, Kubernetes, MQTT, EMQ X, MQTT Broker, 车联网 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=422780369&bvid=BV133411i7JM&cid=473091163&page=1&high_quality=1 + type: iframe + time: 2021-12-30 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +本次分享期望能够让⼤家更深⼊了解物联⽹通信协议 MQTT:⾸先介绍 MQTT 协议基础;然后通过对⽐ MQTT Broker 和消息队列的异同点,并介绍一个云原生 MQTT Broker:EMQ X;最后和⼤家分享 MQTT 协议及 EMQ X 在⻋联⽹场景下的应⽤。 + +## 讲师简介 + +郭祖龙,驭势科技云脑架构研发经理 + +个人简介: +郭祖龙,驭势科技云脑架构研发经理。加⼊驭势科技整整三年,主要负责运营业务、⻋云通信架构等。云原⽣爱好者,期望能够充分享⽤云原⽣红利⾼效提升业务功能开发能⼒。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/mqtt1230-live.png) + +## 直播时间 + +2021 年 12 月 30 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211230` 即可下载 PPT。 + + diff --git a/content/zh/live/multicluster-chengdu.md b/content/zh/live/multicluster-chengdu.md index a8bbccf95..93b681669 100644 --- a/content/zh/live/multicluster-chengdu.md +++ b/content/zh/live/multicluster-chengdu.md @@ -30,5 +30,5 @@ Kubernetes 生态已经成为云原生事实标准,在万物上云的时代必 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 diff --git a/content/zh/live/multicluster-shanghai.md b/content/zh/live/multicluster-shanghai.md index f058697aa..8cf238926 100644 --- a/content/zh/live/multicluster-shanghai.md +++ b/content/zh/live/multicluster-shanghai.md @@ -30,6 +30,6 @@ KubeSphere 平台研发工程师, contributor of kubernetes-sigs, 目前负责 K ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/mysql-hangzhou.md b/content/zh/live/mysql-hangzhou.md index a5e7eada5..81a0841b7 100644 --- a/content/zh/live/mysql-hangzhou.md +++ b/content/zh/live/mysql-hangzhou.md @@ -30,6 +30,6 @@ RadonDB Orgnization 发起者,长期从事分布式数据库内核研发,喜 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/mysql-shanghai.md b/content/zh/live/mysql-shanghai.md index 72b00bb46..b65230f13 100644 --- a/content/zh/live/mysql-shanghai.md +++ b/content/zh/live/mysql-shanghai.md @@ -30,7 +30,7 @@ RadonDB Orgnization 发起者,长期从事分布式数据库内核研发,喜 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/nebula0902-live.md b/content/zh/live/nebula0902-live.md index 4b30f5538..02133b35d 100644 --- a/content/zh/live/nebula0902-live.md +++ b/content/zh/live/nebula0902-live.md @@ -39,4 +39,4 @@ B 站 http://live.bilibili.com/22580654 ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `20210902` 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20210902` 即可下载 PPT。 diff --git a/content/zh/live/nebulagraph-hangzhou.md b/content/zh/live/nebulagraph-hangzhou.md index 619b498c8..014eca54f 100644 --- a/content/zh/live/nebulagraph-hangzhou.md +++ b/content/zh/live/nebulagraph-hangzhou.md @@ -32,4 +32,4 @@ vesoft 为开源的分布式图数据库 Nebula Graph 提供了配套的 DBaaS ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/neonio-shanghai.md b/content/zh/live/neonio-shanghai.md index fd070f207..b0811f0a0 100644 --- a/content/zh/live/neonio-shanghai.md +++ b/content/zh/live/neonio-shanghai.md @@ -30,7 +30,7 @@ QingStor 高级软件工程师 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/nm-cic.md b/content/zh/live/nm-cic.md index 2057310ad..8dc5244b2 100644 --- a/content/zh/live/nm-cic.md +++ b/content/zh/live/nm-cic.md @@ -31,5 +31,5 @@ KubeSphere 可观测性研发工程师 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 北京” 即可下载 PPT。 diff --git a/content/zh/live/openfunction0113-live.md b/content/zh/live/openfunction0113-live.md new file mode 100644 index 000000000..a617b51e6 --- /dev/null +++ b/content/zh/live/openfunction0113-live.md @@ -0,0 +1,48 @@ +--- +title: OpenFunction v0.5.0 新特性讲解与 v0.6.0 展望 +description: 本次分享将整体介绍 OpenFunction,也会讲解 v0.5.0 的新特性以及 v0.6.0 的展望。 +keywords: KubeSphere, Kubernetes, Serverless, FaaS, 函数计算, OpenFunction +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=850694353&bvid=BV1tL4y147yY&cid=483897885&page=1&high_quality=1 + type: iframe + time: 2022-01-13 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +将业务转变为 Serverless 计算模式正逐渐被越来越多的用户所接受,而依托于 Kubernetes 迅速发展起来的云原生生态圈也加速了这一过程。Serverless 可以拆解为两个部分:BaaS 和 FaaS。其中的 FaaS —— 也就是“函数即服务”部分相较于“后端即服务” BaaS 而言,更具中立、多样的特质,因此开源 FaaS 项目应运而生。 + +今天,云原生 Serverless 领域的技术栈突飞猛进,已孵化出 Dapr、KEDA、Knative 等等优秀的开源项目。这驱使 KubeSphere 团队去寻求一种新的 FaaS 平台解决方案 —— 更多样的函数构建方式、更多样的函数调用类型、更多样的弹性伸缩指标以及更完整的一站式服务能力。 + +于是 KubeSphere 团队在设计中大胆地引入了最前沿的技术栈,借助开源社区的力量打造新一代的开源函数计算平台 —— OpenFunction。 本次分享将整体介绍 OpenFunction,也会讲解 v0.5.0 的新特性以及 v0.6.0 的展望。 + +## 讲师简介 + +方阗,KubeSphere 研发工程师,OpenFunction Maintainer,云原生爱好者。 + +雷万钧,KubeSphere 可观测性研发工程师,penFunction Maintainer,云原生爱好者。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/openfunction0113-live.png) + +## 直播时间 + +2022 年 01 月 13 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20220113` 即可下载 PPT。 + + + diff --git a/content/zh/live/pipy819-live.md b/content/zh/live/pipy819-live.md index 055bb5399..f48de7c7b 100644 --- a/content/zh/live/pipy819-live.md +++ b/content/zh/live/pipy819-live.md @@ -33,4 +33,4 @@ section1: ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `2021819` 即可下载 PPT。 \ No newline at end of file +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `2021819` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/qke-ack.md b/content/zh/live/qke-ack.md index 34b92f311..4a5ba0690 100644 --- a/content/zh/live/qke-ack.md +++ b/content/zh/live/qke-ack.md @@ -32,4 +32,4 @@ QKE 可以提供开箱即用、高可用的 KubeSphere,而业务负载可能 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “PPT” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “PPT” 即可下载 PPT。 diff --git a/content/zh/live/ray-shanghai.md b/content/zh/live/ray-shanghai.md index ba44e1505..2915b81e1 100644 --- a/content/zh/live/ray-shanghai.md +++ b/content/zh/live/ray-shanghai.md @@ -30,6 +30,6 @@ Kick-off 将介绍当前云原生生态的发展趋势,解读 KubeSphere 3.1.0 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/live/rook1111-live.md b/content/zh/live/rook1111-live.md new file mode 100644 index 000000000..a7abf32c8 --- /dev/null +++ b/content/zh/live/rook1111-live.md @@ -0,0 +1,48 @@ +--- +title: ROOK 云原生分布式存储开源项目的介绍及其在企业中的应用未来 +description: 今天,Ceph 的云原生化开源项目-ROOK,其正是将 Ceph 技术通过 K8s 现代化的例子,我们对此将展开探讨,探究其发展历程、分布式存储技术在云原生环境下的解决方案,及在云原生世界的未来更为广阔的应用场景。 +keywords: KubeSphere, Kubernetes, 云原生存储 +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=421734665&bvid=BV1D3411873Z&cid=442118347&page=1&high_quality=1 + type: iframe + time: 2021-11-11 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +随着云原生技术在企业中的深入广泛应用,越来越多的场景对于数据存储的需求日渐增长,Ceph 作为开源世界最为成功的分布式存储项目之一,其悠久的历史和技术沉淀为我们带来了无限遐想空间。今天,Ceph 的云原生化开源项目-ROOK,其正是将 Ceph 技术通过 K8s 现代化的例子,我们对此将展开探讨,探究其发展历程、分布式存储技术在云原生环境下的解决方案,及在云原生世界的未来更为广阔的应用场景。 + +## 讲师简介 + +林文炜,RedHat 解决方案架构师 + +个人简介: +林文炜,目前就职于红帽软件中国区技术团队,负责企业级合作伙伴及开源社区技术生态的建设工作。 + +历史工作经历:VMware、Citrix 虚拟化、OpenStack 私有云。 + +当前专注领域:以 Kubernetes 为中心的云原生混合云基础架构。 + +主要负责的解决方案包括:K8s 容器云平台、云原生分布式存储 ROOK 和云原生虚拟化 kubevirt 等开源项目的企业级应用。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/rook1111-live.png) + +## 直播时间 + +2021 年 11 月 11 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + +## PPT 下载 + +可扫描官网底部二维码,关注「KubeSphere云原生」公众号,后台回复 `20211111` 即可下载 PPT。 diff --git a/content/zh/live/segmentfault.md b/content/zh/live/segmentfault.md index f9ae20c10..67759caee 100644 --- a/content/zh/live/segmentfault.md +++ b/content/zh/live/segmentfault.md @@ -30,5 +30,5 @@ SegmentFault(思否)是目前中文领域最大的编程问答交流社区 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 杭州” 即可下载 PPT。 diff --git a/content/zh/live/tideng-chengdu.md b/content/zh/live/tideng-chengdu.md index 1ba52af4f..20d10209c 100644 --- a/content/zh/live/tideng-chengdu.md +++ b/content/zh/live/tideng-chengdu.md @@ -30,5 +30,5 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 成都” 即可下载 PPT。 diff --git a/content/zh/live/uisee0916-live.md b/content/zh/live/uisee0916-live.md index 9ed60afca..9aaca8c3a 100644 --- a/content/zh/live/uisee0916-live.md +++ b/content/zh/live/uisee0916-live.md @@ -38,4 +38,4 @@ B 站 http://live.bilibili.com/22580654 ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `20210916` 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20210916` 即可下载 PPT。 diff --git a/content/zh/live/uisee0923-live.md b/content/zh/live/uisee0923-live.md index 32fdb5506..a5d6268ce 100644 --- a/content/zh/live/uisee0923-live.md +++ b/content/zh/live/uisee0923-live.md @@ -1,6 +1,6 @@ --- title: Kubebuilder 使用简介 -description: Kubebuilder 是一个帮助快速开发自定义资源及控制器的框架工具。和大家一起走进 Kubebuilder,初步了解 Kubebuilder 的基本原理以及使用方法。 +description: Kubebuilder 是一个帮助快速开发定制资源及控制器的框架工具。和大家一起走进 Kubebuilder,初步了解 Kubebuilder 的基本原理以及使用方法。 keywords: KubeSphere, Kubernetes, Kuberbuilder css: scss/live-detail.scss @@ -15,7 +15,7 @@ section1: --- ## 分享内容简介 -Kubebuilder 是一个帮助快速开发自定义资源及控制器的框架工具。本次分享基于上一讲 [Kubernetes 控制器原理简介](https://kubesphere.com.cn/live/uisee0916-live/),和大家一起走进 Kubebuilder,初步了解 Kubebuilder 的基本原理以及使用方法。 +Kubebuilder 是一个帮助快速开发定制资源及控制器的框架工具。本次分享基于上一讲 [Kubernetes 控制器原理简介](https://kubesphere.com.cn/live/uisee0916-live/),和大家一起走进 Kubebuilder,初步了解 Kubebuilder 的基本原理以及使用方法。 ## 讲师简介 @@ -38,4 +38,4 @@ B 站 http://live.bilibili.com/22580654 ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `20210923` 即可下载 PPT。 \ No newline at end of file +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20210923` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/wasm0909-live.md b/content/zh/live/wasm0909-live.md index ae84fb2bc..42dbf4867 100644 --- a/content/zh/live/wasm0909-live.md +++ b/content/zh/live/wasm0909-live.md @@ -40,4 +40,4 @@ B 站 http://live.bilibili.com/22580654 ## PPT 下载 -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 `20210909` 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20210909` 即可下载 PPT。 diff --git a/content/zh/live/webassembly.md b/content/zh/live/webassembly.md index ece082f38..6072d3326 100644 --- a/content/zh/live/webassembly.md +++ b/content/zh/live/webassembly.md @@ -33,4 +33,4 @@ Docker 创始人曾在推特上表示,如果WebAssembly 和 WASI 在2008年就 ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “PPT” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “PPT” 即可下载 PPT。 diff --git a/content/zh/live/webhook1202-live.md b/content/zh/live/webhook1202-live.md new file mode 100644 index 000000000..19b833f6a --- /dev/null +++ b/content/zh/live/webhook1202-live.md @@ -0,0 +1,43 @@ +--- +title: 浅谈 Webhook 开发与实践 +description: 本次分享将带大家了解 Webhook 原理、插件的开发过程以及使用方法,掌握 Admission Webhook 的相关知识。 +keywords: KubeSphere, Kubernetes, Webhook +css: scss/live-detail.scss + +section1: + snapshot: + videoUrl: //player.bilibili.com/player.html?aid=592116483&bvid=BV14q4y1z785&cid=453718387&page=1&high_quality=1 + type: iframe + time: 2021-12-02 20:00-21:00 + timeIcon: /images/live/clock.svg + base: 线上 + baseIcon: /images/live/base.svg +--- +## 分享内容简介 + +Webhook 的概念在互联网初期就被提出,在云原生时代 Webhook 依然作为 Admission 插件为 K8s 进行拓展。本次分享将带大家了解 Webhook 原理、插件的开发过程以及使用方法,掌握 Admission Webhook 的相关知识。 + +## 讲师简介 + +周杨,KubeSphere 后端研发助理工程师 + +个人简介: +周杨,目前就职于青云科技公司容器研发部,负责 KubeSphere 存储模块的功能开发以及相关 CSI 组件的维护与更新工作。 + + +## 分享大纲 + +![](https://pek3b.qingstor.com/kubesphere-community/images/webhook1202-live.png) + +## 直播时间 + +2021 年 12 月 02 日 20:00-21:00 + +## 直播地址 + +B 站 http://live.bilibili.com/22580654 + + +## PPT 下载 + +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211202` 即可下载 PPT。 \ No newline at end of file diff --git a/content/zh/live/yunda1014-live.md b/content/zh/live/yunda1014-live.md index 7f2101057..46744bc87 100644 --- a/content/zh/live/yunda1014-live.md +++ b/content/zh/live/yunda1014-live.md @@ -36,4 +36,6 @@ GitOps 提供了一种自动化的管理基础架构的方法。借助 GitOps, B 站 http://live.bilibili.com/22580654 +## PPT 下载 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 `20211014` 即可下载 PPT。 diff --git a/content/zh/live/zhongtong-shanghai.md b/content/zh/live/zhongtong-shanghai.md index 1bd3f787f..f3649c176 100644 --- a/content/zh/live/zhongtong-shanghai.md +++ b/content/zh/live/zhongtong-shanghai.md @@ -30,7 +30,7 @@ section1: ## 下载 PPT -可扫描官网底部二维码,关注 「KubeSphere 云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 +可扫描官网底部二维码,关注 「KubeSphere云原生」公众号,后台回复 “2021 上海” 即可下载 PPT。 diff --git a/content/zh/news/_index.md b/content/zh/news/_index.md index 1edfe0874..52d8a37ca 100644 --- a/content/zh/news/_index.md +++ b/content/zh/news/_index.md @@ -10,91 +10,135 @@ section1: section2: news: + - title: KubeSphere 3.2.0 GA:为 Kubernetes 管理带来面向 AI 的 GPU 调度和灵活的网关 + description: KubeSphere 是一个快速发展的开源社区,今天宣布正式发布 KubeSphere 3.2.0,这是综合 Kubernetes 管理平台的最新版本。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-stackfault.png + link: https://stackfault.net/t/topic/49486 + - title: KubeSphere 入驻 Azure Marketplace + description: 日前,面向云原生应用的容器混合云 KubeSphere 宣布正式入驻 Azure Marketplace,深度集成微软云容器服务 Azure AKS。 + image: /images/news/doit.jpg + link: https://www.doit.com.cn/p/470686.html + - title: 云原生值与不值的双重思考:好用才能成主流 + description: KubeSphere 是基于 K8s 内核的云原生操作系统,可以帮助企业节约成本,提高效率,最大限度把一个技术平滑传递到企业里的各个部门,屏蔽K8s碎片化。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-cctime.gif + link: http://www.cctime.com/html/2021-7-23/1582703.htm + - title: 我们为什么需要云原生?看完这一篇就够了 + description: 为了让云原生技术平稳落地,基于Kubernetes构建的面向云原生应用的容器混合云 KubeSphere 已经衍生出了一个庞大的家族。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-csdn.png + link: https://mp.weixin.qq.com/s/oMxkpR5d9jcc8eivI4wWkA + - title: 拥抱“云原生”的本质是什么? + description: KubeSphere 经过三年的迭代,已经不是单一的产品,而是衍生成一个产品家族,同时社区成员也在扩大。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-cciv.png + link: http://www.ccw.com.cn/channel/cloudcomputing/2021-07-21/21116.html + - title: 超越 OpenShift,KubeSphere 要打造世界级开源容器平台 + description: 目前由国内企业青云科技打造的开源容器平台KubeSphere,已经成为仅次于 Rancher 和 OpenShift 的全球第三容器管理平台。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-techweb.jpeg + link: http://www.techweb.com.cn/it/2021-07-21/2849931.shtml + - title: 云原生时代到来,KubeSphere 容器平台有看点 + description: 今年五月,青云科技旗下自主研发的容器平台 KubeSphere 3.1.0 版本正式发布。 + image: /images/news/it168.jpg + link: http://cloud.it168.com/a2021/0714/6511/000006511655.shtml + - title: 青云科技主办 KubeSphere and Friends 2021:开源容器混合云,引领云原生 + description: 日前,由青云科技(qingcloud.com,股票代码:688316)旗下容器平台 KubeSphere 主办的“KubeSphere and Friends 2021”云原生 Meetup 线下沙龙首站在上海成功举办。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-vrsina.png + link: http://vr.sina.com.cn/2021-06-09/doc-ikqcfnca0086155.shtml + - title: KubeSphere 3.1.0 正式发布:混合云走向边缘,让应用无处不在 + description: 新版本主打“延伸至边缘侧的容器混合云”,通过集成 KubeEdge,拓展“边缘场景”,可帮助用户加速实现云边协同,在海量边、端设备上完成大规模应用的统一交付、运维与管控。 + image: /images/news/doit.jpg + link: https://www.doit.com.cn/p/442260.html + - title: KubeSphere 3.1.0 发布 主打“延伸至边缘侧的容器混合云” + description: 5 月 11 日消息,青云科技(qingcloud.com,股票代码:688316)旗下自主研发的容器平台 KubeSphere 3.1.0 版本正式发布,新版本主打“延伸至边缘侧的容器混合云”。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-techweb.jpeg + link: http://www.techweb.com.cn/it/2021-05-11/2839069.shtml + - title: KubeSphere 3.1.0 正式发布:混合云走向边缘,让应用无处不在 + description: 日前,青云科技旗下自主研发的容器平台 KubeSphere 3.1.0 版本正式发布。 + image: https://pek3b.qingstor.com/kubesphere-community/images/logo-hexuncaijing.png + link: http://tech.hexun.com/2021-05-11/203579390.html - title: KubeSphere 3.0,让混合云一步跨入云原生时代 - description: 混合云的先行者青云QingCloud发布了其容器平台的最新版本——面向云原生应用的容器混合云KubeSphere 3.0,让混合云一步跨入云原生时代。 + description: 混合云的先行者青云QingCloud 发布了其容器平台的最新版本——面向云原生应用的容器混合云 KubeSphere 3.0,让混合云一步跨入云原生时代。 image: https://ap3.qingstor.com/kubesphere-website/docs/中国软件网.png link: 'http://cloud.soft6.com/202006/30/367336.html' - - title: 重磅!全开源的云原生“全家桶“KubeSphere 3.0来了! - description: KubeSphere三大重要特点是核心功能开箱即用、健壮的容器基础设施和企业增强特性,而此次KubeSphere 3.0版本最大的亮点便是多集群管理。 + - title: 重磅!全开源的云原生“全家桶“KubeSphere 3.0 来了! + description: KubeSphere 三大重要特点是核心功能开箱即用、健壮的容器基础设施和企业增强特性,而此次 KubeSphere 3.0 版本最大的亮点便是多集群管理。 image: https://ap3.qingstor.com/kubesphere-website/docs/中国经济网.jpg link: 'http://m.ce.cn/bwzg/202006/30/t20200630_35227640.shtml' - title: KubeSphere 3.0:打造开放架构,与合作伙伴共建云原生生态 - description: 青云QingCloud作为国内最早一批云平台研发和公有云服务提供商,在如何开发底层基础设施(存储、网络)以及企业常用的中间件、应用模板方面具有丰富经验。这些能力也会输出到KubeSphere,进而使得KubeSphere底层基础设施变得稳健和易用。 + description: 青云QingCloud 作为国内最早一批云平台研发和公有云服务提供商,在如何开发底层基础设施(存储、网络)以及企业常用的中间件、应用模板方面具有丰富经验。这些能力也会输出到KubeSphere,进而使得 KubeSphere 底层基础设施变得稳健和易用。 image: https://ap3.qingstor.com/kubesphere-website/docs/商业伙伴.png link: 'http://www.cnbp.net/news/detail/24087' - - title: 容器混合云时代已来,KubeSphere的差异化竞争力在哪? - description: 从极简易用切入市场,靠用户体验赢得口碑,并加速布局全球生态和开放架构,不断进化的KubeSphere将成为青云QingCloud发力容器混合云时代的利器。 + - title: 容器混合云时代已来,KubeSphere 的差异化竞争力在哪? + description: 从极简易用切入市场,靠用户体验赢得口碑,并加速布局全球生态和开放架构,不断进化的 KubeSphere 将成为青云QingCloud 发力容器混合云时代的利器。 image: https://ap3.qingstor.com/kubesphere-website/docs/KubeSphere3.0发布.png link: 'https://mp.weixin.qq.com/s/6LbczexqB052dPlKeeD5ug' - - title: KubeSphere全新升级 3.0版聚焦全球化 - description: 在开源社区中,KubeSphere被称为云原生“全家桶”。一方面因为它解决了Kubernetes遗留下来的诸多琐碎问题,另一方面它延续青云QingCloud在产品设计上交互体验的优势,让其可以“开箱即用”。 + - title: KubeSphere全新升级 3.0 版聚焦全球化 + description: 在开源社区中,KubeSphere 被称为云原生“全家桶”。一方面因为它解决了Kubernetes遗留下来的诸多琐碎问题,另一方面它延续青云QingCloud 在产品设计上交互体验的优势,让其可以“开箱即用”。 image: https://ap3.qingstor.com/kubesphere-website/docs/光明网.png link: 'https://share.gmw.cn/tech/2020-07/02/content_33960132.htm' - - title: KubeSphere 3.0发布 多集群管理满足企业容器混合云需求 - description: KubeSphere 3.0提供的多集群管理不仅仅是管理底层的基础设施,还增加了两个应用层面的功能:一是部署应用时可以跨集群部署。二是应用全生命周期管理平台——OpenPitrix。 + - title: KubeSphere 3.0 发布 多集群管理满足企业容器混合云需求 + description: KubeSphere 3.0 提供的多集群管理不仅仅是管理底层的基础设施,还增加了两个应用层面的功能:一是部署应用时可以跨集群部署。二是应用全生命周期管理平台——OpenPitrix。 image: https://ap3.qingstor.com/kubesphere-website/docs/环球.jpeg link: 'https://tech.huanqiu.com/article/3yt3kl6DZ1k' - - title: 容器混合云时代即将到来,KubeSphere 3.0抢先布局 - description: KubeSphere 3.0作为一款以Kubernetes为基础,管理云原生应用的一种分布式操作系统,提供了一种可插拔式的开放架构,致力于解决混合云应用诉求,为企业迈向容器混合云时代架起了一座桥梁。 + - title: 容器混合云时代即将到来,KubeSphere 3.0 抢先布局 + description: KubeSphere 3.0 作为一款以 Kubernetes 为基础,管理云原生应用的一种分布式操作系统,提供了一种可插拔式的开放架构,致力于解决混合云应用诉求,为企业迈向容器混合云时代架起了一座桥梁。 image: https://ap3.qingstor.com/kubesphere-website/docs/比特网.png link: 'http://cloud.chinabyte.com/50/709938050.shtml' - - title: 云原生乘风破浪 KubeSphere倾心护航 + - title: 云原生乘风破浪 KubeSphere 倾心护航 description: 随着企业数字化转型的深入,业务转型成为企业转型的核心,为了让企业能更好的应对业务模式频繁的变化,云原生成为企业数字化转型的最佳选择。 image: https://ap3.qingstor.com/kubesphere-website/docs/ENI.jpg link: 'http://www.enicn.com/Enicn/2020/cloud_0701/49459.html' - - title: KubeSphere 3.0发布:支持多集群管理,构建容器混合云,生态友好是重点 - description: KubeSphere极大地降低了Kubernetes的入门门槛,是非常理想的Kubernetes新手村教官。它能大大降低Kubernetes的使用难度,用户可以对照着KubeSphere了解整个Kubernetes体系,了解Kubernetes的主要概念和特性。 + - title: KubeSphere 3.0 发布:支持多集群管理,构建容器混合云,生态友好是重点 + description: KubeSphere 极大地降低了 Kubernetes 的入门门槛,是非常理想的 Kubernetes 新手村教官。它能大大降低 Kubernetes 的使用难度,用户可以对照着 KubeSphere 了解整个 Kubernetes 体系,了解 Kubernetes 的主要概念和特性。 image: /images/news/doit.jpg link: 'https://www.doit.com.cn/p/372734.html' - - title: KubeSphere 3.0正式发布 容器混合云时代 以开放架构打造云原生应用生态 - description: KubeSphere是以Kubernetes为基础,管理云原生应用的一种分布式操作系统,致力于解决混合云时代的应用诉求。它提供可插拔的开放式架构,第三方应用可以无缝对接,让用户使用KubeSphere第三方应用也与KubeSphere原生应用一样快速平滑,能够让企业一步跨入云原生时代。 + - title: KubeSphere 3.0 正式发布 容器混合云时代 以开放架构打造云原生应用生态 + description: KubeSphere 是以 Kubernetes 为基础,管理云原生应用的一种分布式操作系统,致力于解决混合云时代的应用诉求。它提供可插拔的开放式架构,第三方应用可以无缝对接,让用户使用 KubeSphere 第三方应用也与 KubeSphere 原生应用一样快速平滑,能够让企业一步跨入云原生时代。 image: https://ap3.qingstor.com/kubesphere-website/docs/信息主管.png link: 'http://www.cio360.net/index.php?m=content&c=index&a=show&catid=603&id=100845' - - title: KubeSphere 3.0发布,满足对容器混合云的所有想象 - description: KubeSphere只提供最核心的服务,比如管理基础设施、工作负载,提供系统级别的服务等等,其他都交给Open Architecture。 + - title: KubeSphere 3.0 发布,满足对容器混合云的所有想象 + description: KubeSphere 只提供最核心的服务,比如管理基础设施、工作负载,提供系统级别的服务等等,其他都交给 Open Architecture。 image: https://ap3.qingstor.com/kubesphere-website/docs/168.png link: 'http://cloud.it168.com/a2020/0702/6240/000006240993.shtml' - - title: 从“开箱即用”到“多集群管理”,KubeSphere迭代速度惊人的硬实力 - description: KubeSphere除了多集群管理能力外,还提供了很多诸如强大的基础设施、全栈容器云以及企业级特性增强等其他功能的真正原因,帮助企业一步跨入云原生时代。 + - title: 从“开箱即用”到“多集群管理”,KubeSphere 迭代速度惊人的硬实力 + description: KubeSphere 除了多集群管理能力外,还提供了很多诸如强大的基础设施、全栈容器云以及企业级特性增强等其他功能的真正原因,帮助企业一步跨入云原生时代。 image: https://ap3.qingstor.com/kubesphere-website/docs/中国科技网.png link: 'http://www.stdaily.com/zhuanti01/dsj/2020-06/30/content_967587.shtml' - - title: KubeSphere 3.0来了 多集群管理亮了 - description: 在混合云时代,KubeSphere多集群管理就是围绕Kubernetes的必备能力和用户的急切需求所打造的。所以,KubeSphere 3.0目标就是针对大量存在的异构混合云提供一个中央控制面板,不管是从运维角度还是知识技能的接受程度,都极大地降低了用户的成本。 + - title: KubeSphere 3.0 来了 多集群管理亮了 + description: 在混合云时代,KubeSphere 多集群管理就是围绕 Kubernetes 的必备能力和用户的急切需求所打造的。所以,KubeSphere 3.0 目标就是针对大量存在的异构混合云提供一个中央控制面板,不管是从运维角度还是知识技能的接受程度,都极大地降低了用户的成本。 image: https://ap3.qingstor.com/kubesphere-website/docs/赛迪网.png link: 'http://www.ccidnet.com/2020/0701/10532121.shtml' - - title: 如何构建面向云原生应用的容器混合云?KubeSphere 3.0给出答案! - description: 作为青云QingCloud旗下面向云原生应用的容器平台,KubeSphere解决的就是混合云成为一种常态化带来的应用层面上的诉求,KubeSphere能够让企业快速迈入云原生时代。 + - title: 如何构建面向云原生应用的容器混合云?KubeSphere 3.0 给出答案! + description: 作为青云QingCloud 旗下面向云原生应用的容器平台,KubeSphere 解决的就是混合云成为一种常态化带来的应用层面上的诉求,KubeSphere 能够让企业快速迈入云原生时代。 image: https://ap3.qingstor.com/kubesphere-website/docs/至顶网.jpg link: 'http://server.zhiding.cn/server/2020/0701/3127434.shtml' - title: KubeSphere 3.0:敞开胸怀、海纳百川 - description: 升级后的KubeSphere 3.0将为企业的数字化转型提供更可靠的支撑,也将为企业生产带来更高效的惊喜,让企业将更多的精力从基础设施中解放出来,投放在核心应用上。企业也将由此进入到发展快轨,平步青云。 + description: 升级后的 KubeSphere 3.0 将为企业的数字化转型提供更可靠的支撑,也将为企业生产带来更高效的惊喜,让企业将更多的精力从基础设施中解放出来,投放在核心应用上。企业也将由此进入到发展快轨,平步青云。 image: https://ap3.qingstor.com/kubesphere-website/docs/数据猿.png link: 'https://mp.weixin.qq.com/s/2SlllfKoKiaU7zL6T5vYrg' - - title: 容器混合云来了,KubeSphere 3.0宣战 + - title: 容器混合云来了,KubeSphere 3.0 宣战 description: 混合云正在发生变化,随着云计算的普及,企业为了避免云厂商锁定,采用多个云厂商的服务或者产品。于是,新的问题产生了,企业如何在异构平台上无缝管理、部署和迁移其应用。以前混合云是面向资源的,现在面向应用了。 image: https://ap3.qingstor.com/kubesphere-website/docs/中国电子报.jpg link: 'http://www.cena.com.cn/cloudservice/20200630/107542.html' - title: KubeSphere 3.0全新升级 打造全球生态影响力 - description: 过去半年,KubeSphere海外用户量已经超过国内,KubeSphere 3.0发布后,也将进一步推动KubeSphere业务的全球影响力。 + description: 过去半年,KubeSphere 海外用户量已经超过国内,KubeSphere 3.0 发布后,也将进一步推动 KubeSphere 业务的全球影响力。 image: https://ap3.qingstor.com/kubesphere-website/docs/techweb.jpeg link: 'http://www.techweb.com.cn/article/2020-06-30/2795600.shtml' - - title: 多集群管理之后, KubeSphere的高光时刻来了 - description: KubeSphere提供了一个用户体验非常优秀的控制台,用户可以用非常低的成本去学习容器、云原生应用和K8s,几乎不需要任何成本地使用上面的应用,如DevOps、微服务治理、应用分发,多集群管理等。 + - title: 多集群管理之后, KubeSphere 的高光时刻来了 + description: KubeSphere 提供了一个用户体验非常优秀的控制台,用户可以用非常低的成本去学习容器、云原生应用和 K8s,几乎不需要任何成本地使用上面的应用,如 DevOps、微服务治理、应用分发,多集群管理等。 image: https://ap3.qingstor.com/kubesphere-website/docs/51cto.png link: 'https://cloud.51cto.com/art/202006/619927.htm' - - title: KubeSphere研发总监周小四:在谈容器同质化之前,你得先理解工匠精神 + - title: KubeSphere 研发总监周小四:在谈容器同质化之前,你得先理解工匠精神 description: 日语中有个说法:“一生悬命”,意思是指不遗余力去做一件值得耗尽一生去追求的事情,如今常被用来诠释匠人精神的内涵。在技术的世界,追求“匠心”同样被人们所尊重和推崇。 image: https://ap3.qingstor.com/kubesphere-website/docs/KubeSphere工匠.jpg link: 'https://mp.weixin.qq.com/s/MDDV8MVuCL9SGxvXYVZQJQ' - - title: 从混合云到云原生 KubeSphere 3.0先把书读厚,再把书读薄 - description: 青云QingCloud通过将容器、云原生相关技术“读厚”,打造KubeSphere,才能帮助企业把云原生技术“读薄”。 + - title: 从混合云到云原生 KubeSphere 3.0 先把书读厚,再把书读薄 + description: 青云 QingCloud 通过将容器、云原生相关技术“读厚”,打造 KubeSphere,才能帮助企业把云原生技术“读薄”。 image: https://ap3.qingstor.com/kubesphere-website/docs/KubeSphere3.0发布.png link: 'https://mp.weixin.qq.com/s/rS0HwG7oqzgBJNM05XKV5g' - title: 谷歌开源的K8S太“难”用?青云推KubeSphere抢占容器市场 - description: 青云QingCloud一边模仿Red Hat,一边想要吃掉单一生态的容器创业公司,云计算未来的竞争之路应该怎么走? + description: 青云QingCloud 一边模仿 Red Hat,一边想要吃掉单一生态的容器创业公司,云计算未来的竞争之路应该怎么走? image: /images/news/tmtpost.jpg link: 'https://www.tmtpost.com/3908673.html' - - title: 一把利剑,青云容器新品KubeSphere之六大核心功能 + - title: 一把利剑,青云容器新品 KubeSphere 之六大核心功能 description: 一个基于 Kubernetes(K8S) 基础构建的企业级分布式多租户容器管理平台,是青云在今年Cloud Insight大会上推出7大年度新品的核心产品,它有哪些功能亮点? image: /images/news/doit.jpg link: 'https://www.doit.com.cn/p/311804.html' @@ -102,16 +146,16 @@ section2: description: 近日,KubeSphere 容器平台高级版 2.0 正式发布并加入开源组织 CNCF,这是否足以解决 Kubernetes 存在的诸多问题? image: /images/news/info.jpg link: 'https://www.infoq.cn/article/zcDT2RY1h2dksVld-CZU' - - title: 为什么说KubeSphere容器平台是云原生时代的“集大成者”? - description: 从定义来看,云原生意味着企业的应用程序要在云中,而不是在传统的数据中心。而CNCF(Cloud Native Computing Foundation 云原生计算基金会)则认为,使用开源软件堆栈进行容器化,这才是真正的云原生 + - title: 为什么说 KubeSphere 容器平台是云原生时代的“集大成者”? + description: 从定义来看,云原生意味着企业的应用程序要在云中,而不是在传统的数据中心。而 CNCF(Cloud Native Computing Foundation 云原生计算基金会)则认为,使用开源软件堆栈进行容器化,这才是真正的云原生。 image: /images/news/it168.jpg link: 'http://cloud.it168.com/a2019/0428/5169/000005169586.shtml' - - title: 专访青云KubeSphere容器团队:我们为何不放过这个新赛道? - description: Kubernetes这一容器工具的发起者是远在美国山景城的谷歌,但眼下,这一技术正跨越太平洋,被中国的青云QingCloud做了更多优化升级。那么,其背后的团队是经过怎样曲折的过程才打磨成功这款产品?经历了多少故事? + - title: 专访青云 KubeSphere 容器团队:我们为何不放过这个新赛道? + description: Kubernetes 这一容器工具的发起者是远在美国山景城的谷歌,但眼下,这一技术正跨越太平洋,被中国的青云QingCloud 做了更多优化升级。那么,其背后的团队是经过怎样曲折的过程才打磨成功这款产品?经历了多少故事? image: /images/news/leifeng.jpg link: 'https://www.leiphone.com/news/201906/IgfYixoS86T6uxWA.html' - - title: 三问KubeSphere 容器平台有何过人之处? - description: 数字孪生、数字化、万物互联&5G、AI、区块链……层出不穷的新技术迎面而来,数字化转型也从1.0进入到了2.0时代,如何将生产、销售、运营等所有环节赋予数字力量,经由业务全面满足顾客所需,构建未来竞争优势,青云QingCloud深谙此道。 + - title: 三问 KubeSphere 容器平台有何过人之处? + description: 数字孪生、数字化、万物互联&5G、AI、区块链……层出不穷的新技术迎面而来,数字化转型也从 1.0 进入到了 2.0 时代,如何将生产、销售、运营等所有环节赋予数字力量,经由业务全面满足顾客所需,构建未来竞争优势,青云QingCloud 深谙此道。 image: /images/news/ccid.jpg link: 'http://news.ccidnet.com/2019/0419/10470510.shtml' ---- \ No newline at end of file +--- diff --git a/content/zh/privacy/_index.md b/content/zh/privacy/_index.md index 35aefb475..642765013 100644 --- a/content/zh/privacy/_index.md +++ b/content/zh/privacy/_index.md @@ -61,7 +61,7 @@ css: "scss/private.scss" For the purpose of the GDPR, Service Providers are considered Data Processors.

  • -

    Third-party Social Media Service refers to any website or any social network website through which a User can log in or create an account to use the Service.

    +

    Third-party Social Media Service refers to any website or any social network website through which a User can log in or create a user to use the Service.

  • Usage Data refers to data collected automatically, either generated by the use of the Service or from the Service infrastructure itself (for example, the duration of a page visit).

    diff --git a/data/zh/footer.yaml b/data/zh/footer.yaml index c27129755..63e1b9786 100644 --- a/data/zh/footer.yaml +++ b/data/zh/footer.yaml @@ -81,4 +81,4 @@ footer: - content: 技术支持服务 link: 'https://kubesphere.cloud/ticket/' - content: 了解商业版与咨询合作 - link: 'https://jinshuju.net/f/tPUldX' + link: 'https://jinshuju.net/f/C8uB8k' diff --git a/data/zh/video.json b/data/zh/video.json index f7cf7b4dd..7f3c31a93 100644 --- a/data/zh/video.json +++ b/data/zh/video.json @@ -340,7 +340,7 @@ "link": "https://kubesphere-docs.pek3b.qingstor.com/video/Demo-Install-KubeSphere-on-OpenShift.mp4", "createTime": "2020.01.20", "snapshot": "https://pek3b.qingstor.com/kubesphere-docs/png/20200308224101.png", - "group": "Webniar", + "group": "Webinar", "tag": "容器" }, { diff --git a/layouts/case/single.html b/layouts/case/single.html index 717698bc2..856517b5f 100644 --- a/layouts/case/single.html +++ b/layouts/case/single.html @@ -27,43 +27,48 @@
      {{ range .listLeft}} -
    • - {{ if eq .type 1}} -
        - {{ range .contentList }} -
      • -

        {{ .content }}

        -
      • - {{ end }} -
      - {{ else if eq .type 2}} -
      -

      {{ .content }}

      -

      {{ .author }}

      - -
      - {{ else }} -

      {{ .title }}

      - {{ range .contentList }} - {{ if .specialContent }} - {{ with .specialContent }} - {{ if .link }} - {{ partial "case_specialContent.html" . }} - {{ else if .level }} - {{ partial "case_specialContent.html" . }} - {{ else }} -

      {{ .text }}

      +
    • + {{ if eq .type 1}} +
        + {{ range .contentList }} +
      • +

        {{ .content }}

        +
      • + {{ end }} +
      + {{ else if eq .type 2}} +
      +

      {{ .content }}

      +

      {{ .author }}

      + +
      + {{ else }} +

      {{ .title }}

      + {{ range $item,$index := .contentList }} + {{ if isset $index "specialContent" }} + {{ with $index.specialContent }} + {{ if .link }} + {{ partial "case_specialContent.html" . }} + {{ else if .level }} + {{ partial "case_specialContent.html" . }} + {{ else }} +

      {{ .text }}

      + {{ end }} + {{ end }} + {{ end }} + {{ if isset $index "content" }} + {{ with $index.content }} +

      {{ . }}

      + {{ end }} + {{ end }} + {{ end }} + {{ if isset . "image" }} + {{ with .image }} + {{ end }} {{ end }} - {{else}} -

      {{ .content }}

      {{ end }} - {{ end }} - {{ if .image }} - - {{ end }} - {{ end }} -
    • + {{ end }}
    diff --git a/layouts/conferences/list.html b/layouts/conferences/list.html index d4c6fef9c..5b1af1176 100644 --- a/layouts/conferences/list.html +++ b/layouts/conferences/list.html @@ -7,7 +7,7 @@ {{ $viewDetail := .Params.viewDetail }} {{ range .Params.list }}
  • -
    +
    {{ .name }}

    {{ .name }}

    {{ .content }}

    diff --git a/layouts/docs/single.html b/layouts/docs/single.html index 367324ea4..bb11381fc 100644 --- a/layouts/docs/single.html +++ b/layouts/docs/single.html @@ -137,12 +137,11 @@ - {{if eq .Site.LanguageCode "en-US"}} - {{if .Params.showSubscribe }} +

    - Sign up for latest tutorials and Kubernetes tips + {{ i18n "Receive the latest news, articles and updates from KubeSphere" }}

    - {{end}} - {{end}} +

    {{ i18n "Msg-Thank" }}

    diff --git a/layouts/learn/list.html b/layouts/learn/list.html index d4c3423c6..df18b239e 100644 --- a/layouts/learn/list.html +++ b/layouts/learn/list.html @@ -22,7 +22,7 @@
    - + {{ end }} @@ -77,8 +77,8 @@

    {{ .Params.section6.title }}

      - {{ range .Page.Sections }} -
    • + {{ range sort .Page.Sections "Weight" }} +
    • {{ .LinkTitle }}

      @@ -90,7 +90,7 @@ - {{ range .Page.Sections }} + {{ range sort .Page.Sections "Weight" }} @@ -106,10 +106,20 @@ {{ end }} +
      {{ .LinkTitle }} {{ .Params.profit }}
    • - {{ end }} + {{ end }}
    @@ -129,7 +139,7 @@ {{ i18n "Apply for the job" }}
  • - {{ end }} + {{ end }}
    {{ i18n "View more jobs" }} @@ -195,7 +205,6 @@ var topActive = function() { var topHeight = $('.section-2').outerHeight() top.find('a').each(function() { var id = $(this).attr('href') - console.log(id) var h = $(id) var elementToTop = getElementTopToScreenTop(h) if (elementToTop < headerHeight + topHeight + 10) { @@ -211,8 +220,38 @@ var bindScrollEvent = function() { topActive() }) } + +var tableInit = function () { + const tableLi = $('.tableLi') + tableLi.each((index,item)=>{ + const eachLi = $(item) + const allTrLine = eachLi.find('table tbody') + dealWithTableBody(eachLi[0], allTrLine) + }) +} +var dealWithTableBody = function (line, tbody) { + const positionLine = $(line) + const originBody = $(tbody).clone() + const button = positionLine.find('button') + if (tbody.children().length < 6) { + button.addClass('hideButton') + } else { + const allTR = tbody.children().slice(0, 5) + tbody.empty().append(allTR) + } + button.on('click', function () { + button.toggleClass('active') + if (tbody.children().length > 5) { + const allTR = tbody.children().slice(0, 5) + tbody.empty().append(allTR) + } else { + tbody.empty().append(originBody.clone().children()) + } + }) +} bindScrollEvent() initScrollByHash() bindClickTopLink() +tableInit() {{ end }} \ No newline at end of file diff --git a/layouts/learn/single.html b/layouts/learn/single.html index ec3f82d9e..57c160dea 100644 --- a/layouts/learn/single.html +++ b/layouts/learn/single.html @@ -25,57 +25,55 @@
    {{ end }} -
    +
    {{ $relPermalink := .RelPermalink }} {{ with .Site.GetPage "/learn" }} - {{ range .Sections }} + {{ range .Sections }} +
    +
    + {{ .LinkTitle }} + +
    + +
    + {{ end }} + {{ end }}
    - {{ end }} \ No newline at end of file diff --git a/layouts/live/list.html b/layouts/live/list.html index 6f1239179..e71ec8629 100644 --- a/layouts/live/list.html +++ b/layouts/live/list.html @@ -2,23 +2,23 @@ {{ with .Params.section1 }}
    -
    -

    {{ .title }}

    -
    +
    +

    {{ .title }}

    +
    {{ end }} {{ with .Params.section2 }}
    -
    - -
    +
    + +
    {{ with .notice }}

    {{ .title }}

    @@ -36,8 +36,8 @@
    {{ .tag }}
    {{ end }} -
    -
    +
    +
    {{ end }} @@ -63,52 +63,64 @@ {{ end }} - {{ $overImg := .Params.section4.overImg}} {{ $noticeImg := .Params.section4.noticeImg}} {{ with .Params.section4 }} -
    -
    - {{ range .list }} -
    -
    - {{$over := (time .lastTime).Unix}} - {{ if ge now.Unix $over}} - - {{ else }} - +
    +
    +
    {{ .title }}
    +
    +
      + {{ range .list }} + {{ with . }} +
    • {{ .year }}
    • {{ end }} -
      -

      {{ .date }}

      -

      {{ .time }}

      + {{ end }} +
    +
    + {{ range $index,$data := .list }} + {{ with . }} + + {{ end }} + {{ end }} +
    +
    +
    +
    +
    +
    -

    {{ .title }}

    - {{ if ge now.Unix $over}} - - {{ else }} - - {{ end }}
    - {{ end }}
    + {{ end }} + {{ with .Params.section5 }}
    @@ -125,15 +137,15 @@ {{ end }} \ No newline at end of file diff --git a/layouts/partials/content.html b/layouts/partials/content.html index 2fecd61f2..7f6174422 100644 --- a/layouts/partials/content.html +++ b/layouts/partials/content.html @@ -59,7 +59,6 @@
    - {{ if eq $LanguageCode "en-US"}}
    close @@ -77,7 +76,6 @@
    - {{end}} {{ partial "footer.html" $context }} {{ $aside := resources.Get "js/aside.js" }} {{ $asideJS := $aside | resources.Fingerprint "sha512" }} diff --git a/layouts/partials/css.html b/layouts/partials/css.html index dbb01f9e5..1ac5dd487 100644 --- a/layouts/partials/css.html +++ b/layouts/partials/css.html @@ -2,6 +2,7 @@ + {{ $common := resources.Get "scss/common.scss" | toCSS | minify | fingerprint }} diff --git a/layouts/partials/head.html b/layouts/partials/head.html index 162107a16..c56c79e85 100644 --- a/layouts/partials/head.html +++ b/layouts/partials/head.html @@ -25,11 +25,25 @@ + {{ if .IsDescendant (.GetPage "/docs") }} {{ end }} + +{{ if .Site.Params.addBaiduAnalytics }} + +{{ end }} + {{ if .Site.Params.addGoogleAnalytics }} diff --git a/layouts/partials/header.html b/layouts/partials/header.html index 6a6693c10..369e3139f 100644 --- a/layouts/partials/header.html +++ b/layouts/partials/header.html @@ -3,7 +3,7 @@ {{ if eq .Site.Language.Lang "zh"}}
    - 🚀KubeSphere 3.1.1 全新发布!混合多云走向边缘,让应用无处不在,查看 v3.1.x 详细解读与视频👩‍💻 + 🚀 KubeSphere 3.2.1 已发布!多项功能优化,带来更好的用户体验,点击查看 v3.2.1 发行记录👩‍💻 close
    @@ -11,7 +11,7 @@ {{ if eq .Site.Language.Lang "en"}}
    - 🚀 KubeSphere 3.1.1 is now available as it extends Kubernetes from the cloud to the edge. Read the introduction for 3.1.x → + 🚀 KubeSphere 3.2.1 was released on Dec 20, it brought enhancements and better user experience. Read the release notes for 3.2.1 → close
    diff --git a/localization_style_guides/KubeSphere Localization Style Guide (for Simplified Chinese).md b/localization_style_guides/KubeSphere Localization Style Guide (for Simplified Chinese).md index 2de11e0a4..00d7bded5 100644 --- a/localization_style_guides/KubeSphere Localization Style Guide (for Simplified Chinese).md +++ b/localization_style_guides/KubeSphere Localization Style Guide (for Simplified Chinese).md @@ -18,7 +18,7 @@ Make sure the term you use is the correct one in UI, or it may confuse readers w | Source Text | Correct Target Text | Wrong Target Text | | -------------------------------------------------------- | ------------------------------------ | -------------------------------- | -| Click **Components** on the **Cluster Management** page. | 在**集群管理**页面点击**服务组件**。 | 在**集群管理**页面点击**组件**。 | +| Click **System Components** on the **Cluster Management** page. | 在**集群管理**页面点击**系统组件**。 | 在**集群管理**页面点击**系统组件**。 | ## Format diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/00-federation-control-plane.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/00-federation-control-plane.png new file mode 100644 index 000000000..36f16db15 Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/00-federation-control-plane.png differ diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/01-cluster-management.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/01-cluster-management.png new file mode 100644 index 000000000..d93c03f4a Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/01-cluster-management.png differ diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/02-add-cluster.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/02-add-cluster.png new file mode 100644 index 000000000..3e43ca322 Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/02-add-cluster.png differ diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/create-stateless-service-png.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/create-stateless-service-png.png new file mode 100644 index 000000000..c7da1b3c5 Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/create-stateless-service-png.png differ diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/multi-tenant-support.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/multi-tenant-support.png new file mode 100644 index 000000000..72a78390d Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/multi-tenant-support.png differ diff --git a/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/view-status.png b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/view-status.png new file mode 100644 index 000000000..7c478f855 Binary files /dev/null and b/static/images/blogs/en/Kubernetes-multicluster-KubeSphere/view-status.png differ diff --git a/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/add-log-receiver.png b/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/add-log-receiver.png new file mode 100644 index 000000000..353453522 Binary files /dev/null and b/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/add-log-receiver.png differ diff --git a/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/kubesphere snapshot.png b/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/kubesphere snapshot.png new file mode 100644 index 000000000..b8538272b Binary files /dev/null and b/static/images/blogs/en/Serverless-way-for-Kubernetes-Log-Alerting/kubesphere snapshot.png differ diff --git a/static/images/blogs/en/kubekey-containerd/kubernetes-containerd-banner.png b/static/images/blogs/en/kubekey-containerd/kubernetes-containerd-banner.png new file mode 100644 index 000000000..9ddea3830 Binary files /dev/null and b/static/images/blogs/en/kubekey-containerd/kubernetes-containerd-banner.png differ diff --git a/static/images/blogs/en/okta/oidc.png b/static/images/blogs/en/okta/oidc.png new file mode 100644 index 000000000..f0aeab6c3 Binary files /dev/null and b/static/images/blogs/en/okta/oidc.png differ diff --git a/static/images/blogs/en/okta/step-4.png b/static/images/blogs/en/okta/step-4.png new file mode 100644 index 000000000..ab8496c05 Binary files /dev/null and b/static/images/blogs/en/okta/step-4.png differ diff --git a/static/images/blogs/en/okta/step2.png b/static/images/blogs/en/okta/step2.png new file mode 100644 index 000000000..8edbf2483 Binary files /dev/null and b/static/images/blogs/en/okta/step2.png differ diff --git a/static/images/blogs/en/okta/step3-1.png b/static/images/blogs/en/okta/step3-1.png new file mode 100644 index 000000000..85d088054 Binary files /dev/null and b/static/images/blogs/en/okta/step3-1.png differ diff --git a/static/images/blogs/en/okta/step3-2.png b/static/images/blogs/en/okta/step3-2.png new file mode 100644 index 000000000..bdb6bdbd0 Binary files /dev/null and b/static/images/blogs/en/okta/step3-2.png differ diff --git a/static/images/blogs/en/okta/step3-3.png b/static/images/blogs/en/okta/step3-3.png new file mode 100644 index 000000000..2f8d7f690 Binary files /dev/null and b/static/images/blogs/en/okta/step3-3.png differ diff --git a/static/images/blogs/en/okta/step3-4.png b/static/images/blogs/en/okta/step3-4.png new file mode 100644 index 000000000..96cfaf66e Binary files /dev/null and b/static/images/blogs/en/okta/step3-4.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/00-GPU-scheduling-quota-manage.png b/static/images/blogs/en/release-announcement3.2.0/00-GPU-scheduling-quota-manage.png new file mode 100644 index 000000000..7449beec1 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/00-GPU-scheduling-quota-manage.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/01-Grafana-dashboard.png b/static/images/blogs/en/release-announcement3.2.0/01-Grafana-dashboard.png new file mode 100644 index 000000000..42b63bef2 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/01-Grafana-dashboard.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/02-GPU-overview.png b/static/images/blogs/en/release-announcement3.2.0/02-GPU-overview.png new file mode 100644 index 000000000..0300b45a4 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/02-GPU-overview.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/03-platform-settings.png b/static/images/blogs/en/release-announcement3.2.0/03-platform-settings.png new file mode 100644 index 000000000..f7ad28b6a Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/03-platform-settings.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/04-cluster-manage.png b/static/images/blogs/en/release-announcement3.2.0/04-cluster-manage.png new file mode 100644 index 000000000..720f738b3 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/04-cluster-manage.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/05-federated-deployment.png b/static/images/blogs/en/release-announcement3.2.0/05-federated-deployment.png new file mode 100644 index 000000000..1f4c4fafd Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/05-federated-deployment.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/06-view-federation.png b/static/images/blogs/en/release-announcement3.2.0/06-view-federation.png new file mode 100644 index 000000000..f0f35d588 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/06-view-federation.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/07-volume-manage.png b/static/images/blogs/en/release-announcement3.2.0/07-volume-manage.png new file mode 100644 index 000000000..a03e69ca4 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/07-volume-manage.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/08-storage-class-settings.png b/static/images/blogs/en/release-announcement3.2.0/08-storage-class-settings.png new file mode 100644 index 000000000..5dc7bfc4f Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/08-storage-class-settings.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/09-volumes.png b/static/images/blogs/en/release-announcement3.2.0/09-volumes.png new file mode 100644 index 000000000..a56c26c03 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/09-volumes.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/10-gateway-settings.png b/static/images/blogs/en/release-announcement3.2.0/10-gateway-settings.png new file mode 100644 index 000000000..c6bf40269 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/10-gateway-settings.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/11-gateway-settings2.png b/static/images/blogs/en/release-announcement3.2.0/11-gateway-settings2.png new file mode 100644 index 000000000..72b605a90 Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/11-gateway-settings2.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/12-app-store.png b/static/images/blogs/en/release-announcement3.2.0/12-app-store.png new file mode 100644 index 000000000..32d2f60dc Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/12-app-store.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/3.2.0GA.png b/static/images/blogs/en/release-announcement3.2.0/3.2.0GA.png new file mode 100644 index 000000000..b3d34fcaa Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/3.2.0GA.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/v3.2.0-GA-cover.png b/static/images/blogs/en/release-announcement3.2.0/v3.2.0-GA-cover.png new file mode 100644 index 000000000..09a29904e Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/v3.2.0-GA-cover.png differ diff --git a/static/images/blogs/en/release-announcement3.2.0/v3.2.0-contributors.png b/static/images/blogs/en/release-announcement3.2.0/v3.2.0-contributors.png new file mode 100644 index 000000000..6badbb87a Binary files /dev/null and b/static/images/blogs/en/release-announcement3.2.0/v3.2.0-contributors.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/00-access-control.png b/static/images/blogs/en/x509-certificate-exporter/00-access-control.png new file mode 100644 index 000000000..f55c4bbf6 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/00-access-control.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/01-create-user.png b/static/images/blogs/en/x509-certificate-exporter/01-create-user.png new file mode 100644 index 000000000..d065b04d5 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/01-create-user.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/02-three-users.png b/static/images/blogs/en/x509-certificate-exporter/02-three-users.png new file mode 100644 index 000000000..fa18f8e9c Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/02-three-users.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/03-ws-manager.png b/static/images/blogs/en/x509-certificate-exporter/03-ws-manager.png new file mode 100644 index 000000000..90842d66b Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/03-ws-manager.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/04-create-workspace.png b/static/images/blogs/en/x509-certificate-exporter/04-create-workspace.png new file mode 100644 index 000000000..d67d77d95 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/04-create-workspace.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/05-invite-member.png b/static/images/blogs/en/x509-certificate-exporter/05-invite-member.png new file mode 100644 index 000000000..764b22118 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/05-invite-member.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/06-assign-role.png b/static/images/blogs/en/x509-certificate-exporter/06-assign-role.png new file mode 100644 index 000000000..746d28503 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/06-assign-role.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/07-create-project.png b/static/images/blogs/en/x509-certificate-exporter/07-create-project.png new file mode 100644 index 000000000..bd513f652 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/07-create-project.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/08-enter-project-name.png b/static/images/blogs/en/x509-certificate-exporter/08-enter-project-name.png new file mode 100644 index 000000000..14fc2b82e Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/08-enter-project-name.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/09-view-project-info.png b/static/images/blogs/en/x509-certificate-exporter/09-view-project-info.png new file mode 100644 index 000000000..6f9b0fe30 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/09-view-project-info.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/10-invite-project-member.png b/static/images/blogs/en/x509-certificate-exporter/10-invite-project-member.png new file mode 100644 index 000000000..c5e44f9ae Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/10-invite-project-member.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/11-assign-project-role.png b/static/images/blogs/en/x509-certificate-exporter/11-assign-project-role.png new file mode 100644 index 000000000..139af85e0 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/11-assign-project-role.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/12-add-repo.png b/static/images/blogs/en/x509-certificate-exporter/12-add-repo.png new file mode 100644 index 000000000..617cfebba Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/12-add-repo.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/13-add-repo2.png b/static/images/blogs/en/x509-certificate-exporter/13-add-repo2.png new file mode 100644 index 000000000..3d2665a8a Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/13-add-repo2.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/14-view-repo.png b/static/images/blogs/en/x509-certificate-exporter/14-view-repo.png new file mode 100644 index 000000000..8ef08db65 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/14-view-repo.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/15-create-app.png b/static/images/blogs/en/x509-certificate-exporter/15-create-app.png new file mode 100644 index 000000000..325125aba Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/15-create-app.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/16-create-app2.png b/static/images/blogs/en/x509-certificate-exporter/16-create-app2.png new file mode 100644 index 000000000..d333ec031 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/16-create-app2.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/17-select-enix.png b/static/images/blogs/en/x509-certificate-exporter/17-select-enix.png new file mode 100644 index 000000000..063a88246 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/17-select-enix.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/18-select-x509.png b/static/images/blogs/en/x509-certificate-exporter/18-select-x509.png new file mode 100644 index 000000000..0bb8a0ff5 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/18-select-x509.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/19-deploy-x590.png b/static/images/blogs/en/x509-certificate-exporter/19-deploy-x590.png new file mode 100644 index 000000000..71c304de9 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/19-deploy-x590.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/20-set-app-name.png b/static/images/blogs/en/x509-certificate-exporter/20-set-app-name.png new file mode 100644 index 000000000..cba784a88 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/20-set-app-name.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/21-app-settings.png b/static/images/blogs/en/x509-certificate-exporter/21-app-settings.png new file mode 100644 index 000000000..b51b10e44 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/21-app-settings.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/22-explain-parameters.png b/static/images/blogs/en/x509-certificate-exporter/22-explain-parameters.png new file mode 100644 index 000000000..4690a504b Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/22-explain-parameters.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/23-view-created-app.png b/static/images/blogs/en/x509-certificate-exporter/23-view-created-app.png new file mode 100644 index 000000000..ad586e744 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/23-view-created-app.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/24-monitoring-alerting.png b/static/images/blogs/en/x509-certificate-exporter/24-monitoring-alerting.png new file mode 100644 index 000000000..1021b8c90 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/24-monitoring-alerting.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/25-create-alerting-policy.png b/static/images/blogs/en/x509-certificate-exporter/25-create-alerting-policy.png new file mode 100644 index 000000000..2ccd88d1c Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/25-create-alerting-policy.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/26-custom-rule.png b/static/images/blogs/en/x509-certificate-exporter/26-custom-rule.png new file mode 100644 index 000000000..2f705417b Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/26-custom-rule.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/27-message-settings.png b/static/images/blogs/en/x509-certificate-exporter/27-message-settings.png new file mode 100644 index 000000000..d36838e86 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/27-message-settings.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/28-view-created-alerting-rule.png b/static/images/blogs/en/x509-certificate-exporter/28-view-created-alerting-rule.png new file mode 100644 index 000000000..74224ec10 Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/28-view-created-alerting-rule.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/29-built-in-alerting-policy.png b/static/images/blogs/en/x509-certificate-exporter/29-built-in-alerting-policy.png new file mode 100644 index 000000000..6fdb9752d Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/29-built-in-alerting-policy.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/30-view-alerting-rule.png b/static/images/blogs/en/x509-certificate-exporter/30-view-alerting-rule.png new file mode 100644 index 000000000..5cb14d45e Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/30-view-alerting-rule.png differ diff --git a/static/images/blogs/en/x509-certificate-exporter/x509-certificate-exporter-cover-image.png b/static/images/blogs/en/x509-certificate-exporter/x509-certificate-exporter-cover-image.png new file mode 100644 index 000000000..742037a5e Binary files /dev/null and b/static/images/blogs/en/x509-certificate-exporter/x509-certificate-exporter-cover-image.png differ diff --git a/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/create-route.png b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/create-route.png new file mode 100644 index 000000000..da50bf5df Binary files /dev/null and b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/create-route.png differ diff --git a/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/deployment-list.png b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/deployment-list.png new file mode 100644 index 000000000..13bf4b543 Binary files /dev/null and b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/deployment-list.png differ diff --git a/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/routing-rules.png b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/routing-rules.png new file mode 100644 index 000000000..a46f72e59 Binary files /dev/null and b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/routing-rules.png differ diff --git a/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/snapshot.png b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/snapshot.png new file mode 100644 index 000000000..7df5adf2e Binary files /dev/null and b/static/images/blogs/how-to-use-kubernetes-project-gateways-and-routes/snapshot.png differ diff --git a/static/images/blogs/log4j/log4j.jpeg b/static/images/blogs/log4j/log4j.jpeg new file mode 100644 index 000000000..f7b269820 Binary files /dev/null and b/static/images/blogs/log4j/log4j.jpeg differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/00-enable-gateway.png b/static/images/blogs/transform-traditional-applications-into-microservices/00-enable-gateway.png new file mode 100644 index 000000000..5b496315e Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/00-enable-gateway.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/01-create-mysql.png b/static/images/blogs/transform-traditional-applications-into-microservices/01-create-mysql.png new file mode 100644 index 000000000..b49e3b549 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/01-create-mysql.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/02-create-mysql.png b/static/images/blogs/transform-traditional-applications-into-microservices/02-create-mysql.png new file mode 100644 index 000000000..32089d6b4 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/02-create-mysql.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/03-set-password.png b/static/images/blogs/transform-traditional-applications-into-microservices/03-set-password.png new file mode 100644 index 000000000..5f03c9eb5 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/03-set-password.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/04-create-wp.png b/static/images/blogs/transform-traditional-applications-into-microservices/04-create-wp.png new file mode 100644 index 000000000..49615b3d9 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/04-create-wp.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/05-create-wp.png b/static/images/blogs/transform-traditional-applications-into-microservices/05-create-wp.png new file mode 100644 index 000000000..73b720713 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/05-create-wp.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/06-create-wp.png b/static/images/blogs/transform-traditional-applications-into-microservices/06-create-wp.png new file mode 100644 index 000000000..fc530b00c Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/06-create-wp.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/07-nodeport.png b/static/images/blogs/transform-traditional-applications-into-microservices/07-nodeport.png new file mode 100644 index 000000000..a307506ad Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/07-nodeport.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/08-check-app.png b/static/images/blogs/transform-traditional-applications-into-microservices/08-check-app.png new file mode 100644 index 000000000..51b1cade6 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/08-check-app.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/09-expose-port.png b/static/images/blogs/transform-traditional-applications-into-microservices/09-expose-port.png new file mode 100644 index 000000000..f9d7b0622 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/09-expose-port.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/10-traffic-topology.png b/static/images/blogs/transform-traditional-applications-into-microservices/10-traffic-topology.png new file mode 100644 index 000000000..76524e5a7 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/10-traffic-topology.png differ diff --git a/static/images/blogs/transform-traditional-applications-into-microservices/traffic-monitoring-cover.png b/static/images/blogs/transform-traditional-applications-into-microservices/traffic-monitoring-cover.png new file mode 100644 index 000000000..715d12876 Binary files /dev/null and b/static/images/blogs/transform-traditional-applications-into-microservices/traffic-monitoring-cover.png differ diff --git a/static/images/case/ZTO/ZTO1.png b/static/images/case/ZTO/ZTO1.png new file mode 100644 index 000000000..7f02b6161 Binary files /dev/null and b/static/images/case/ZTO/ZTO1.png differ diff --git a/static/images/case/ZTO/ZTO2.jpeg b/static/images/case/ZTO/ZTO2.jpeg new file mode 100644 index 000000000..bc3fde333 Binary files /dev/null and b/static/images/case/ZTO/ZTO2.jpeg differ diff --git a/static/images/case/ZTO/ZTO3.jpeg b/static/images/case/ZTO/ZTO3.jpeg new file mode 100644 index 000000000..fe1751630 Binary files /dev/null and b/static/images/case/ZTO/ZTO3.jpeg differ diff --git a/static/images/case/ZTO/ZTO4.jpeg b/static/images/case/ZTO/ZTO4.jpeg new file mode 100644 index 000000000..22c188cdb Binary files /dev/null and b/static/images/case/ZTO/ZTO4.jpeg differ diff --git a/static/images/case/ZTO/ZTO5.jpeg b/static/images/case/ZTO/ZTO5.jpeg new file mode 100644 index 000000000..4578b5e9a Binary files /dev/null and b/static/images/case/ZTO/ZTO5.jpeg differ diff --git a/static/images/case/ZTO/ZTO6.jpg b/static/images/case/ZTO/ZTO6.jpg new file mode 100644 index 000000000..4f5d28300 Binary files /dev/null and b/static/images/case/ZTO/ZTO6.jpg differ diff --git a/static/images/case/logo-msxf.png b/static/images/case/logo-msxf.png new file mode 100644 index 000000000..c983ad092 Binary files /dev/null and b/static/images/case/logo-msxf.png differ diff --git a/static/images/case/logo-qunar.png b/static/images/case/logo-qunar.png new file mode 100644 index 000000000..ace49dfab Binary files /dev/null and b/static/images/case/logo-qunar.png differ diff --git a/static/images/case/segmentfault-logo.png b/static/images/case/segmentfault-logo.png new file mode 100644 index 000000000..2eec6f416 Binary files /dev/null and b/static/images/case/segmentfault-logo.png differ diff --git a/static/images/case/uisee.png b/static/images/case/uisee.png new file mode 100644 index 000000000..d74ed5a13 Binary files /dev/null and b/static/images/case/uisee.png differ diff --git a/static/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/kubesphere-login-page.png b/static/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/kubesphere-login-page.png deleted file mode 100644 index b75c20853..000000000 Binary files a/static/images/docs/access-control-and-account-management/external-authentication/use-an-oauth2-identity-provider/kubesphere-login-page.png and /dev/null differ diff --git a/static/images/docs/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png b/static/images/docs/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png deleted file mode 100644 index c237c96c1..000000000 Binary files a/static/images/docs/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/account-ready.png b/static/images/docs/appstore/application-lifecycle-management/account-ready.png deleted file mode 100644 index bdd3812b9..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/account-ready.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/activate-app.png b/static/images/docs/appstore/application-lifecycle-management/activate-app.png deleted file mode 100644 index 2a57e4152..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/activate-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/activate-version.png b/static/images/docs/appstore/application-lifecycle-management/activate-version.png deleted file mode 100644 index 323bb3d61..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/activate-version.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-active.png b/static/images/docs/appstore/application-lifecycle-management/app-active.png deleted file mode 100644 index 24f628c8f..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-active.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-category-1.jpg b/static/images/docs/appstore/application-lifecycle-management/app-category-1.jpg deleted file mode 100644 index a83af3c19..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-category-1.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-category.png b/static/images/docs/appstore/application-lifecycle-management/app-category.png deleted file mode 100644 index c1b0ebb2c..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-category.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-deploy.jpg b/static/images/docs/appstore/application-lifecycle-management/app-deploy.jpg deleted file mode 100644 index 86134102d..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-deploy.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-draft.png b/static/images/docs/appstore/application-lifecycle-management/app-draft.png deleted file mode 100644 index 80409f9c1..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-draft.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-in-category-list-expected.png b/static/images/docs/appstore/application-lifecycle-management/app-in-category-list-expected.png deleted file mode 100644 index 4b40cc1ef..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-in-category-list-expected.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-processing.jpg b/static/images/docs/appstore/application-lifecycle-management/app-processing.jpg deleted file mode 100644 index ceb599f75..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-processing.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-review-name.png b/static/images/docs/appstore/application-lifecycle-management/app-review-name.png deleted file mode 100644 index ce482f90b..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-review-name.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-store.png b/static/images/docs/appstore/application-lifecycle-management/app-store.png deleted file mode 100644 index 96bca7218..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-templates-page.png b/static/images/docs/appstore/application-lifecycle-management/app-templates-page.png deleted file mode 100644 index 4f940ab66..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-templates-page.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-to-be-reviewed.png b/static/images/docs/appstore/application-lifecycle-management/app-to-be-reviewed.png deleted file mode 100644 index 0a7abb683..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-to-be-reviewed.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/app-to-be-upgraded.png b/static/images/docs/appstore/application-lifecycle-management/app-to-be-upgraded.png deleted file mode 100644 index e4563e5a5..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/app-to-be-upgraded.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/assign-category.jpg b/static/images/docs/appstore/application-lifecycle-management/assign-category.jpg deleted file mode 100644 index 2caa13af5..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/assign-category.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/confirm-category.jpg b/static/images/docs/appstore/application-lifecycle-management/confirm-category.jpg deleted file mode 100644 index e81ed9705..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/confirm-category.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/create-new-version.jpg b/static/images/docs/appstore/application-lifecycle-management/create-new-version.jpg deleted file mode 100644 index 21b7a6876..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/create-new-version.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/create-review-role.png b/static/images/docs/appstore/application-lifecycle-management/create-review-role.png deleted file mode 100644 index ec3f7a733..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/create-review-role.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/create-role.png b/static/images/docs/appstore/application-lifecycle-management/create-role.png deleted file mode 100644 index 2cfaa042f..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/create-role.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/create-roles.png b/static/images/docs/appstore/application-lifecycle-management/create-roles.png deleted file mode 100644 index 8a3505adf..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/create-roles.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/deploy-redis.png b/static/images/docs/appstore/application-lifecycle-management/deploy-redis.png deleted file mode 100644 index aaba2a064..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/deploy-redis.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/deployed-instance-success.png b/static/images/docs/appstore/application-lifecycle-management/deployed-instance-success.png deleted file mode 100644 index 3552ef5d0..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/deployed-instance-success.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/deploying-app.png b/static/images/docs/appstore/application-lifecycle-management/deploying-app.png deleted file mode 100644 index 883231d9f..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/deploying-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/deployment-place.png b/static/images/docs/appstore/application-lifecycle-management/deployment-place.png deleted file mode 100644 index d27ae819e..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/deployment-place.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/edit-app-information.png b/static/images/docs/appstore/application-lifecycle-management/edit-app-information.png deleted file mode 100644 index 5e5ee0858..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/edit-app-information.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/edit-app-template.png b/static/images/docs/appstore/application-lifecycle-management/edit-app-template.png deleted file mode 100644 index 6e3ecd9af..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/edit-app-template.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/edit-template.png b/static/images/docs/appstore/application-lifecycle-management/edit-template.png deleted file mode 100644 index 526324f6c..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/edit-template.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/new-version-draft.jpg b/static/images/docs/appstore/application-lifecycle-management/new-version-draft.jpg deleted file mode 100644 index f664f77c1..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/new-version-draft.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/new-version-redis.png b/static/images/docs/appstore/application-lifecycle-management/new-version-redis.png deleted file mode 100644 index a25558db4..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/new-version-redis.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/redis.png b/static/images/docs/appstore/application-lifecycle-management/redis.png deleted file mode 100644 index 039759a9b..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/redis.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/release-to-store.jpg b/static/images/docs/appstore/application-lifecycle-management/release-to-store.jpg deleted file mode 100644 index e2e3d5aa6..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/release-to-store.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/remove-app.png b/static/images/docs/appstore/application-lifecycle-management/remove-app.png deleted file mode 100644 index 393aaaca2..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/remove-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/reviewing.png b/static/images/docs/appstore/application-lifecycle-management/reviewing.png deleted file mode 100644 index 4440c1a55..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/reviewing.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/see-new-version.png b/static/images/docs/appstore/application-lifecycle-management/see-new-version.png deleted file mode 100644 index 12725ffb1..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/see-new-version.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/set-app-type.png b/static/images/docs/appstore/application-lifecycle-management/set-app-type.png deleted file mode 100644 index 64ba296fa..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/set-app-type.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/set-category-for-app.png b/static/images/docs/appstore/application-lifecycle-management/set-category-for-app.png deleted file mode 100644 index cfda70c41..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/set-category-for-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/submit-for-review.png b/static/images/docs/appstore/application-lifecycle-management/submit-for-review.png deleted file mode 100644 index 4d7d005bf..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/submit-for-review.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/submitted-app.png b/static/images/docs/appstore/application-lifecycle-management/submitted-app.png deleted file mode 100644 index 281a00781..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/submitted-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/suspend-app.png b/static/images/docs/appstore/application-lifecycle-management/suspend-app.png deleted file mode 100644 index 56dcd40b7..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/suspend-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/suspend-version.png b/static/images/docs/appstore/application-lifecycle-management/suspend-version.png deleted file mode 100644 index fbd9873c9..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/suspend-version.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/template-list-one-app.jpg b/static/images/docs/appstore/application-lifecycle-management/template-list-one-app.jpg deleted file mode 100644 index fe5b4d43c..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/template-list-one-app.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/test-deployment.png b/static/images/docs/appstore/application-lifecycle-management/test-deployment.png deleted file mode 100644 index 1c5739319..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/test-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/test-new-version.jpg b/static/images/docs/appstore/application-lifecycle-management/test-new-version.jpg deleted file mode 100644 index 183765243..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/test-new-version.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upgrade-an-app.jpg b/static/images/docs/appstore/application-lifecycle-management/upgrade-an-app.jpg deleted file mode 100644 index 1eabb34a7..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upgrade-an-app.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upgrade-app.png b/static/images/docs/appstore/application-lifecycle-management/upgrade-app.png deleted file mode 100644 index 5e518a598..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upgrade-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upgrade-finish.png b/static/images/docs/appstore/application-lifecycle-management/upgrade-finish.png deleted file mode 100644 index 7f9c076f4..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upgrade-finish.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upload-app.png b/static/images/docs/appstore/application-lifecycle-management/upload-app.png deleted file mode 100644 index 24f7191d1..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upload-app.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upload-icon.png b/static/images/docs/appstore/application-lifecycle-management/upload-icon.png deleted file mode 100644 index 1bb4166e8..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upload-icon.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upload-new-redis-version.png b/static/images/docs/appstore/application-lifecycle-management/upload-new-redis-version.png deleted file mode 100644 index aea148c9a..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upload-new-redis-version.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upload-new-version.jpg b/static/images/docs/appstore/application-lifecycle-management/upload-new-version.jpg deleted file mode 100644 index 4e1c2426b..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upload-new-version.jpg and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/upload-template.png b/static/images/docs/appstore/application-lifecycle-management/upload-template.png deleted file mode 100644 index 54f22d18b..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/upload-template.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/uploaded-new-version.png b/static/images/docs/appstore/application-lifecycle-management/uploaded-new-version.png deleted file mode 100644 index c15e22051..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/uploaded-new-version.png and /dev/null differ diff --git a/static/images/docs/appstore/application-lifecycle-management/version-upgraded.png b/static/images/docs/appstore/application-lifecycle-management/version-upgraded.png deleted file mode 100644 index 57213c221..000000000 Binary files a/static/images/docs/appstore/application-lifecycle-management/version-upgraded.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/Terminal icon.png b/static/images/docs/appstore/built-in-apps/etcd-app/Terminal icon.png new file mode 100644 index 000000000..bbdf4063e Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/etcd-app/Terminal icon.png differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/deploy-etcd.png b/static/images/docs/appstore/built-in-apps/etcd-app/deploy-etcd.png deleted file mode 100644 index 1b9c817fa..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/deploy-etcd.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/deployment-location.png b/static/images/docs/appstore/built-in-apps/etcd-app/deployment-location.png deleted file mode 100644 index 2ed95c2a6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/deployment-location.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-app-store.png b/static/images/docs/appstore/built-in-apps/etcd-app/etcd-app-store.png deleted file mode 100644 index 05cd049cd..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-command.png b/static/images/docs/appstore/built-in-apps/etcd-app/etcd-command.png deleted file mode 100644 index 148ac4d2a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-command.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-running.png b/static/images/docs/appstore/built-in-apps/etcd-app/etcd-running.png deleted file mode 100644 index 7b8c203dd..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-statefulset.png b/static/images/docs/appstore/built-in-apps/etcd-app/etcd-statefulset.png deleted file mode 100644 index b0bf703f3..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-statefulset.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-terminal.png b/static/images/docs/appstore/built-in-apps/etcd-app/etcd-terminal.png deleted file mode 100644 index 52d00bced..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/etcd-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/project-overview.png b/static/images/docs/appstore/built-in-apps/etcd-app/project-overview.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/project-overview.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/etcd-app/specify-volume.png b/static/images/docs/appstore/built-in-apps/etcd-app/specify-volume.png deleted file mode 100644 index 6db49d2bf..000000000 Binary files a/static/images/docs/appstore/built-in-apps/etcd-app/specify-volume.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/app-store.png b/static/images/docs/appstore/built-in-apps/harbor-app/app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/click-deploy.png b/static/images/docs/appstore/built-in-apps/harbor-app/click-deploy.png deleted file mode 100644 index 79a0c5991..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/click-deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/creating-harbor.png b/static/images/docs/appstore/built-in-apps/harbor-app/creating-harbor.png deleted file mode 100644 index 0c1d20e12..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/creating-harbor.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/deploy-harbor.png b/static/images/docs/appstore/built-in-apps/harbor-app/deploy-harbor.png deleted file mode 100644 index cb2c61736..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/deploy-harbor.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/find-harbor.png b/static/images/docs/appstore/built-in-apps/harbor-app/find-harbor.png deleted file mode 100644 index 7c8f93157..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/find-harbor.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/harbor-app/harbor-config.png b/static/images/docs/appstore/built-in-apps/harbor-app/harbor-config.png deleted file mode 100644 index c2dddda15..000000000 Binary files a/static/images/docs/appstore/built-in-apps/harbor-app/harbor-config.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/deploying-memcached.png b/static/images/docs/appstore/built-in-apps/memcached-app/deploying-memcached.png deleted file mode 100644 index fe5c9216c..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/deploying-memcached.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/deployment-confirm.png b/static/images/docs/appstore/built-in-apps/memcached-app/deployment-confirm.png deleted file mode 100644 index d0bec16d0..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/deployment-confirm.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/edit-config.png b/static/images/docs/appstore/built-in-apps/memcached-app/edit-config.png deleted file mode 100644 index 27d85e90e..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/edit-config.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/in-app-store.png b/static/images/docs/appstore/built-in-apps/memcached-app/in-app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-app-store.png b/static/images/docs/appstore/built-in-apps/memcached-app/memcached-app-store.png deleted file mode 100644 index 3e60df9cc..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-port-pod.png b/static/images/docs/appstore/built-in-apps/memcached-app/memcached-port-pod.png deleted file mode 100644 index 58c22337e..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-port-pod.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-running.png b/static/images/docs/appstore/built-in-apps/memcached-app/memcached-running.png deleted file mode 100644 index 306191da3..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-service.png b/static/images/docs/appstore/built-in-apps/memcached-app/memcached-service.png deleted file mode 100644 index 8501ce72a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/memcached-app/memcached-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/config-file.png b/static/images/docs/appstore/built-in-apps/minio-app/config-file.png deleted file mode 100644 index ef8c48166..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/config-file.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/deloy-minio-2.png b/static/images/docs/appstore/built-in-apps/minio-app/deloy-minio-2.png deleted file mode 100644 index 22a9132e4..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/deloy-minio-2.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/deploy-minio.png b/static/images/docs/appstore/built-in-apps/minio-app/deploy-minio.png deleted file mode 100644 index cf89a2d97..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/deploy-minio.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/edit-internet-access.png b/static/images/docs/appstore/built-in-apps/minio-app/edit-internet-access.png deleted file mode 100644 index 787de8711..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/minio-app.png b/static/images/docs/appstore/built-in-apps/minio-app/minio-app.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/minio-app.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/minio-deploy.png b/static/images/docs/appstore/built-in-apps/minio-app/minio-deploy.png deleted file mode 100644 index d2e46eb73..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/minio-deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/minio-detail.png b/static/images/docs/appstore/built-in-apps/minio-app/minio-detail.png deleted file mode 100644 index 874cdd736..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/minio-detail.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/minio-in-app-store.png b/static/images/docs/appstore/built-in-apps/minio-app/minio-in-app-store.png deleted file mode 100644 index 5c2c97d82..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/minio-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/minio-in-list.png b/static/images/docs/appstore/built-in-apps/minio-app/minio-in-list.png deleted file mode 100644 index 468432407..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/minio-in-list.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/nodeport.png b/static/images/docs/appstore/built-in-apps/minio-app/nodeport.png deleted file mode 100644 index b76697fe9..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/port-exposed.png b/static/images/docs/appstore/built-in-apps/minio-app/port-exposed.png deleted file mode 100644 index 7b9e5a41c..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/port-exposed.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/minio-app/template-list.png b/static/images/docs/appstore/built-in-apps/minio-app/template-list.png deleted file mode 100644 index 06c491b83..000000000 Binary files a/static/images/docs/appstore/built-in-apps/minio-app/template-list.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/app-store.png b/static/images/docs/appstore/built-in-apps/mongodb-app/app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/confirm-deployment.png b/static/images/docs/appstore/built-in-apps/mongodb-app/confirm-deployment.png deleted file mode 100644 index 502c03166..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/deploy-mongodb.png b/static/images/docs/appstore/built-in-apps/mongodb-app/deploy-mongodb.png deleted file mode 100644 index 1dc453585..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/deploy-mongodb.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-in-app-store.png b/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-in-app-store.png deleted file mode 100644 index 08d2421cd..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-running.png b/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-running.png deleted file mode 100644 index 9773af944..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-service.png b/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-service.png deleted file mode 100644 index ce6dcc1da..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-terminal.png b/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-terminal.png deleted file mode 100644 index e09fd835c..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/mongodb-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mongodb-app/set-app-configuration.png b/static/images/docs/appstore/built-in-apps/mongodb-app/set-app-configuration.png deleted file mode 100644 index 49c9f3dff..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mongodb-app/set-app-configuration.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/click-deploy.png b/static/images/docs/appstore/built-in-apps/mysql-app/click-deploy.png deleted file mode 100644 index f6ff9d860..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/click-deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/deploy-mysql.png b/static/images/docs/appstore/built-in-apps/mysql-app/deploy-mysql.png deleted file mode 100644 index 45f72cda6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/deploy-mysql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/edit-internet-access.png b/static/images/docs/appstore/built-in-apps/mysql-app/edit-internet-access.png deleted file mode 100644 index b7f2fd57a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/find-mysql.png b/static/images/docs/appstore/built-in-apps/mysql-app/find-mysql.png deleted file mode 100644 index 14a2bca46..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/find-mysql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/go-to-app-store.png b/static/images/docs/appstore/built-in-apps/mysql-app/go-to-app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/go-to-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-port-number.png b/static/images/docs/appstore/built-in-apps/mysql-app/mysql-port-number.png deleted file mode 100644 index 62bc14076..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-port-number.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-running.png b/static/images/docs/appstore/built-in-apps/mysql-app/mysql-running.png deleted file mode 100644 index 38a02089f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-service.png b/static/images/docs/appstore/built-in-apps/mysql-app/mysql-service.png deleted file mode 100644 index 8a93707c0..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-teminal.png b/static/images/docs/appstore/built-in-apps/mysql-app/mysql-teminal.png deleted file mode 100644 index 7ccef541b..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-teminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-workload.png b/static/images/docs/appstore/built-in-apps/mysql-app/mysql-workload.png deleted file mode 100644 index c2a8b108e..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/mysql-workload.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/nodeport-mysql.png b/static/images/docs/appstore/built-in-apps/mysql-app/nodeport-mysql.png deleted file mode 100644 index f3107a74d..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/nodeport-mysql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/mysql-app/uncomment-password.png b/static/images/docs/appstore/built-in-apps/mysql-app/uncomment-password.png deleted file mode 100644 index e33c3de6f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/mysql-app/uncomment-password.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/app-store.png b/static/images/docs/appstore/built-in-apps/nginx-app/app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/confirm-deployment.png b/static/images/docs/appstore/built-in-apps/nginx-app/confirm-deployment.png deleted file mode 100644 index b62a20f92..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/deploy-nginx.png b/static/images/docs/appstore/built-in-apps/nginx-app/deploy-nginx.png deleted file mode 100644 index 9481d6259..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/deploy-nginx.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/edit-config-nginx.png b/static/images/docs/appstore/built-in-apps/nginx-app/edit-config-nginx.png deleted file mode 100644 index c4ba24e09..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/edit-config-nginx.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/edit-internet-access.png b/static/images/docs/appstore/built-in-apps/nginx-app/edit-internet-access.png deleted file mode 100644 index beca8446a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/exposed-port.png b/static/images/docs/appstore/built-in-apps/nginx-app/exposed-port.png deleted file mode 100644 index 07a18556c..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/exposed-port.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/manifest-file.png b/static/images/docs/appstore/built-in-apps/nginx-app/manifest-file.png deleted file mode 100644 index c23e530ef..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/manifest-file.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-in-app-store.png b/static/images/docs/appstore/built-in-apps/nginx-app/nginx-in-app-store.png deleted file mode 100644 index 066d88680..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-running.png b/static/images/docs/appstore/built-in-apps/nginx-app/nginx-running.png deleted file mode 100644 index ab8a606fc..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-service.png b/static/images/docs/appstore/built-in-apps/nginx-app/nginx-service.png deleted file mode 100644 index 26657a463..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/nginx-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/nginx-app/nodeport.png b/static/images/docs/appstore/built-in-apps/nginx-app/nodeport.png deleted file mode 100644 index ce9e2fd79..000000000 Binary files a/static/images/docs/appstore/built-in-apps/nginx-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/access-postgresql.png b/static/images/docs/appstore/built-in-apps/postgresql-app/access-postgresql.png deleted file mode 100644 index c9e595830..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/access-postgresql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/click-app-store.png b/static/images/docs/appstore/built-in-apps/postgresql-app/click-app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/click-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/container-terminal.png b/static/images/docs/appstore/built-in-apps/postgresql-app/container-terminal.png deleted file mode 100644 index 258e36151..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/container-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png b/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png deleted file mode 100644 index 4c0327480..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql.png b/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql.png deleted file mode 100644 index 0c1c6b975..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/deploy-postgresql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/edit-internet-access.png b/static/images/docs/appstore/built-in-apps/postgresql-app/edit-internet-access.png deleted file mode 100644 index bc853cd93..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/nodeport.png b/static/images/docs/appstore/built-in-apps/postgresql-app/nodeport.png deleted file mode 100644 index d39885ec6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/port-number.png b/static/images/docs/appstore/built-in-apps/postgresql-app/port-number.png deleted file mode 100644 index eb78f307e..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/port-number.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png b/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png deleted file mode 100644 index b844027b2..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-ready.png b/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-ready.png deleted file mode 100644 index 980500795..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/postgresql-ready.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/postgresql-app/set-config.png b/static/images/docs/appstore/built-in-apps/postgresql-app/set-config.png deleted file mode 100644 index 618b21f34..000000000 Binary files a/static/images/docs/appstore/built-in-apps/postgresql-app/set-config.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png deleted file mode 100644 index b83e969a6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png deleted file mode 100644 index 1b580fe7a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png deleted file mode 100644 index 67366ce5d..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png deleted file mode 100644 index a464e97bc..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png deleted file mode 100644 index a7f67e123..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png deleted file mode 100644 index 8e9c31d47..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png deleted file mode 100644 index b68faa689..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png deleted file mode 100644 index 06c321f4d..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png deleted file mode 100644 index d39885ec6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png deleted file mode 100644 index 479999db9..000000000 Binary files a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png deleted file mode 100644 index 69d5eba49..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png deleted file mode 100644 index b04f25948..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png deleted file mode 100644 index 6eb995df1..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png deleted file mode 100644 index 4f5ceb87b..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png deleted file mode 100644 index 403a65f18..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png deleted file mode 100644 index 2c0502d2d..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png b/static/images/docs/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png deleted file mode 100644 index 6578004ad..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png deleted file mode 100644 index b618867e6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png deleted file mode 100644 index 9185cb471..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/pods-running.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/pods-running.png deleted file mode 100644 index e1e998604..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/pods-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/project-overview.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/project-overview.png deleted file mode 100644 index 3fe9f1111..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/project-overview.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png deleted file mode 100644 index 96279efb6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png deleted file mode 100644 index 839ac0b97..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png deleted file mode 100644 index bc90f9979..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png deleted file mode 100644 index 82d01b5dc..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png deleted file mode 100644 index 506a9787e..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png deleted file mode 100644 index ff121d744..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png deleted file mode 100644 index 03f4c67af..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volume-status.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volume-status.png deleted file mode 100644 index fec7ef1f8..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volume-status.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volumes.png b/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volumes.png deleted file mode 100644 index 67c8b57a0..000000000 Binary files a/static/images/docs/appstore/built-in-apps/radondb-postgresql-app/volumes.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/access-redis.png b/static/images/docs/appstore/built-in-apps/redis-app/access-redis.png deleted file mode 100644 index 1d36db386..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/access-redis.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/app-store.png b/static/images/docs/appstore/built-in-apps/redis-app/app-store.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/configure-redis.png b/static/images/docs/appstore/built-in-apps/redis-app/configure-redis.png deleted file mode 100644 index 08f06b26a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/configure-redis.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/confirm-deployment.png b/static/images/docs/appstore/built-in-apps/redis-app/confirm-deployment.png deleted file mode 100644 index 91f7cfd9f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/deploy-redis.png b/static/images/docs/appstore/built-in-apps/redis-app/deploy-redis.png deleted file mode 100644 index 588c2f76a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/deploy-redis.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/redis-in-app-store.png b/static/images/docs/appstore/built-in-apps/redis-app/redis-in-app-store.png deleted file mode 100644 index 402dbb16a..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/redis-in-app-store.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/redis-running.png b/static/images/docs/appstore/built-in-apps/redis-app/redis-running.png deleted file mode 100644 index 5bba077d9..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/redis-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/redis-app/redis-terminal.png b/static/images/docs/appstore/built-in-apps/redis-app/redis-terminal.png deleted file mode 100644 index 8b8fe3856..000000000 Binary files a/static/images/docs/appstore/built-in-apps/redis-app/redis-terminal.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.png b/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.png deleted file mode 100644 index 7dc8bc272..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.png b/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.png deleted file mode 100644 index e4b8d251f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.png b/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.png deleted file mode 100644 index 4fe72c02f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.png b/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.png deleted file mode 100644 index 0bcc6bf84..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.png b/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.png deleted file mode 100644 index 1dc13e985..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.png b/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.png deleted file mode 100644 index 18a7c9c1b..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.png b/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.png deleted file mode 100644 index ef05d82a6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.png b/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.png deleted file mode 100644 index d39885ec6..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.png b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.png deleted file mode 100644 index 37efb2b7f..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.png b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.png deleted file mode 100644 index f386ef5f7..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.png and /dev/null differ diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.png b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.png deleted file mode 100644 index 0bf5583ba..000000000 Binary files a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/add-clickhouse.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/add-clickhouse.png deleted file mode 100644 index cce86eda5..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/add-clickhouse.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/add-repo.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/add-repo.png deleted file mode 100644 index 53586bbdf..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/add-repo.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/app-running.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/app-running.png deleted file mode 100644 index 043fe2047..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/app-running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/basic-info.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/basic-info.png deleted file mode 100644 index 4d62fdf30..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/basic-info.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/change-nodeport.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/change-nodeport.png deleted file mode 100644 index 188f6fe22..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/change-nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/chart-tab.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/chart-tab.png deleted file mode 100644 index fb356eea8..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/chart-tab.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png deleted file mode 100644 index d15dc555f..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy.png deleted file mode 100644 index df4f35e0d..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/click-deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png deleted file mode 100644 index f4032cc44..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-service.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-service.png deleted file mode 100644 index 339a940f8..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/clickhouse-service.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/from-app-templates.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/from-app-templates.png deleted file mode 100644 index 98d8c685a..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/from-app-templates.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/pods-running.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/pods-running.png deleted file mode 100644 index c7f006ab5..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/pods-running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/project-overview.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/project-overview.png deleted file mode 100644 index dcc175626..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/project-overview.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/repo-added.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/repo-added.png deleted file mode 100644 index be53babd8..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/repo-added.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png deleted file mode 100644 index d10dfaec6..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulsets-running.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulsets-running.png deleted file mode 100644 index 456295856..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/volume-status.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/volume-status.png deleted file mode 100644 index f8bda6ea6..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/volume-status.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-clickhouse/volumes.png b/static/images/docs/appstore/external-apps/deploy-clickhouse/volumes.png deleted file mode 100644 index a66e64fff..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-clickhouse/volumes.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/add-main_repo.png b/static/images/docs/appstore/external-apps/deploy-gitlab/add-main_repo.png deleted file mode 100644 index fe3745007..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/add-main_repo.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/add_repo.png b/static/images/docs/appstore/external-apps/deploy-gitlab/add_repo.png deleted file mode 100644 index 354236e13..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/add_repo.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/added-main_repo.png b/static/images/docs/appstore/external-apps/deploy-gitlab/added-main_repo.png deleted file mode 100644 index 3b256ce77..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/added-main_repo.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/basic_info.png b/static/images/docs/appstore/external-apps/deploy-gitlab/basic_info.png deleted file mode 100644 index 731f22853..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/basic_info.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/change_value.png b/static/images/docs/appstore/external-apps/deploy-gitlab/change_value.png deleted file mode 100644 index b8c5e97bc..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/change_value.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/click_gitlab.png b/static/images/docs/appstore/external-apps/deploy-gitlab/click_gitlab.png deleted file mode 100644 index f63bfac8b..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/click_gitlab.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/deploy_app.png b/static/images/docs/appstore/external-apps/deploy-gitlab/deploy_app.png deleted file mode 100644 index ccdafa1ba..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/deploy_app.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/deployments_running.png b/static/images/docs/appstore/external-apps/deploy-gitlab/deployments_running.png deleted file mode 100644 index 47add29f1..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/deployments_running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/from-app_templates.png b/static/images/docs/appstore/external-apps/deploy-gitlab/from-app_templates.png deleted file mode 100644 index b86a7dd7e..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/from-app_templates.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/gitlab_running.png b/static/images/docs/appstore/external-apps/deploy-gitlab/gitlab_running.png deleted file mode 100644 index f71aaadc0..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/gitlab_running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/initial_password.png b/static/images/docs/appstore/external-apps/deploy-gitlab/initial_password.png deleted file mode 100644 index 6faa5c457..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/initial_password.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/search_secret.png b/static/images/docs/appstore/external-apps/deploy-gitlab/search_secret.png deleted file mode 100644 index 46bbaa4ec..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/search_secret.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/search_service.png b/static/images/docs/appstore/external-apps/deploy-gitlab/search_service.png deleted file mode 100644 index d691d9bbe..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/search_service.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/statefulsets_running.png b/static/images/docs/appstore/external-apps/deploy-gitlab/statefulsets_running.png deleted file mode 100644 index a6d7cfc63..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/statefulsets_running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-gitlab/view_config.png b/static/images/docs/appstore/external-apps/deploy-gitlab/view_config.png deleted file mode 100644 index b14c459ca..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-gitlab/view_config.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/imported-successfully.png b/static/images/docs/appstore/external-apps/deploy-litmus/imported-successfully.png deleted file mode 100644 index 304b8d7fd..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/imported-successfully.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/litmus-nodeport.png b/static/images/docs/appstore/external-apps/deploy-litmus/litmus-nodeport.png deleted file mode 100644 index d7038a450..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/litmus-nodeport.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/litmus-running.png b/static/images/docs/appstore/external-apps/deploy-litmus/litmus-running.png deleted file mode 100644 index 579aeee5b..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/litmus-running.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/nginx-replica.png b/static/images/docs/appstore/external-apps/deploy-litmus/nginx-replica.png deleted file mode 100644 index 8ac2d4f20..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/nginx-replica.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/one-pod-left.png b/static/images/docs/appstore/external-apps/deploy-litmus/one-pod-left.png deleted file mode 100644 index a9a8050bd..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/one-pod-left.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/pod-cpu-hog.png b/static/images/docs/appstore/external-apps/deploy-litmus/pod-cpu-hog.png deleted file mode 100644 index 0d54497ce..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/pod-cpu-hog.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-litmus/terminate-and-recreate.png b/static/images/docs/appstore/external-apps/deploy-litmus/terminate-and-recreate.png deleted file mode 100644 index 2024fe118..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-litmus/terminate-and-recreate.png and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/add-metersphere-repo.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/add-metersphere-repo.PNG deleted file mode 100644 index 30a2d81cc..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/add-metersphere-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/add-repo.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/add-repo.PNG deleted file mode 100644 index 77812ad83..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/add-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/added-metersphere-repo.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/added-metersphere-repo.PNG deleted file mode 100644 index fdcde2333..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/added-metersphere-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/basic-info.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/basic-info.PNG deleted file mode 100644 index 325e250fa..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/basic-info.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/change-value.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/change-value.PNG deleted file mode 100644 index 8fe414db0..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/change-value.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/click-metersphere.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/click-metersphere.PNG deleted file mode 100644 index 146a31d90..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/click-metersphere.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/deploy-app.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/deploy-app.PNG deleted file mode 100644 index 50e229395..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/deploy-app.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/deployments-running.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/deployments-running.PNG deleted file mode 100644 index a88172389..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/deployments-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/from-app-templates.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/from-app-templates.PNG deleted file mode 100644 index 800936fdb..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/from-app-templates.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-running.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-running.PNG deleted file mode 100644 index e5cb0ab79..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-service.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-service.PNG deleted file mode 100644 index 85fb2ca4d..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/metersphere-service.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/statefulsets-running.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/statefulsets-running.PNG deleted file mode 100644 index 10072c91f..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/statefulsets-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-metersphere/view-config.PNG b/static/images/docs/appstore/external-apps/deploy-metersphere/view-config.PNG deleted file mode 100644 index f3d40e866..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-metersphere/view-config.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG deleted file mode 100644 index e8365da28..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG deleted file mode 100644 index 4473e332d..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG deleted file mode 100644 index c8684d62d..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG deleted file mode 100644 index e7c6a1853..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG deleted file mode 100644 index 8f8128e3b..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG deleted file mode 100644 index 716fd326e..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG deleted file mode 100644 index f62fc66a7..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG deleted file mode 100644 index f46e53dc5..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG deleted file mode 100644 index 6caad9bd8..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG deleted file mode 100644 index a0b160da2..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG deleted file mode 100644 index 979fdd890..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG deleted file mode 100644 index 74010da31..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG deleted file mode 100644 index 6898ad24e..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG deleted file mode 100644 index 3fb6cce53..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG deleted file mode 100644 index 9c202bbcb..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG deleted file mode 100644 index 51199c863..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG deleted file mode 100644 index 62a03cd08..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG deleted file mode 100644 index 63a74b182..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG deleted file mode 100644 index 3be364e72..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG deleted file mode 100644 index d372f6269..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG deleted file mode 100644 index 603936412..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG deleted file mode 100644 index 514b8b69b..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG deleted file mode 100644 index 6f1e09421..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG deleted file mode 100644 index a77e768bc..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG deleted file mode 100644 index b89bd6918..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG deleted file mode 100644 index 5a1104729..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG deleted file mode 100644 index 8e0679b30..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG deleted file mode 100644 index 82d3797f9..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG and /dev/null differ diff --git a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG b/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG deleted file mode 100644 index eb623f927..000000000 Binary files a/static/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG and /dev/null differ diff --git a/static/images/docs/appstore/harbor/active_of_harbor.png b/static/images/docs/appstore/harbor/active_of_harbor.png deleted file mode 100644 index c3fa7fb50..000000000 Binary files a/static/images/docs/appstore/harbor/active_of_harbor.png and /dev/null differ diff --git a/static/images/docs/appstore/harbor/choose_app_from_store.png b/static/images/docs/appstore/harbor/choose_app_from_store.png deleted file mode 100644 index 079dffda7..000000000 Binary files a/static/images/docs/appstore/harbor/choose_app_from_store.png and /dev/null differ diff --git a/static/images/docs/appstore/harbor/config_of_harbor_deploy.png b/static/images/docs/appstore/harbor/config_of_harbor_deploy.png deleted file mode 100644 index 2c927b10b..000000000 Binary files a/static/images/docs/appstore/harbor/config_of_harbor_deploy.png and /dev/null differ diff --git a/static/images/docs/appstore/harbor/deploy_set_of_harbor.png b/static/images/docs/appstore/harbor/deploy_set_of_harbor.png deleted file mode 100644 index ec1272c17..000000000 Binary files a/static/images/docs/appstore/harbor/deploy_set_of_harbor.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/application-resources-monitoring/application-resources-monitoring.png b/static/images/docs/cluster-administration/application-resources-monitoring/application-resources-monitoring.png deleted file mode 100644 index 2a4e2c4f9..000000000 Binary files a/static/images/docs/cluster-administration/application-resources-monitoring/application-resources-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png b/static/images/docs/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png deleted file mode 100644 index 24321ed10..000000000 Binary files a/static/images/docs/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/application-resources-monitoring/time-range.png b/static/images/docs/cluster-administration/application-resources-monitoring/time-range.png deleted file mode 100644 index 9bc5904d8..000000000 Binary files a/static/images/docs/cluster-administration/application-resources-monitoring/time-range.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/application-resources-monitoring/usage-ranking.png b/static/images/docs/cluster-administration/application-resources-monitoring/usage-ranking.png deleted file mode 100644 index 5ba90ffc8..000000000 Binary files a/static/images/docs/cluster-administration/application-resources-monitoring/usage-ranking.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.jpg b/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.jpg deleted file mode 100644 index 63cd3f2c2..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png b/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png deleted file mode 100644 index fc458f5dd..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.jpg b/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.jpg deleted file mode 100644 index b034f187d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-cluster.jpg b/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-cluster.jpg deleted file mode 100644 index 89dc9ca78..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-cluster.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.jpg b/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.jpg deleted file mode 100644 index 66bf5fd6d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-es.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-es.png deleted file mode 100644 index 27cf1734f..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-es.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-receiver.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-receiver.png deleted file mode 100644 index 82239c3e1..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/add-receiver.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/receiver-list.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/receiver-list.png deleted file mode 100644 index 12208e356..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver/receiver-list.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-fluentd.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-fluentd.png deleted file mode 100644 index 3495d2495..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-fluentd.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-receiver.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-receiver.png deleted file mode 100644 index 82239c3e1..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/add-receiver.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/container-logs.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/container-logs.png deleted file mode 100644 index f649dede3..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/container-logs.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/receiver-list.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/receiver-list.png deleted file mode 100644 index 12208e356..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/receiver-list.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/add-kafka.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/add-kafka.png deleted file mode 100644 index 268e6490e..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/add-kafka.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/change-status.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/change-status.png deleted file mode 100644 index 23c7ca3f5..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/change-status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections-events.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections-events.png deleted file mode 100644 index d7587b02b..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections-events.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections.png deleted file mode 100644 index 1ad92dde4..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/log-collections.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/more.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/more.png deleted file mode 100644 index 2f61e2ef6..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/more.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/receiver-status.png b/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/receiver-status.png deleted file mode 100644 index 6b6ed0d59..000000000 Binary files a/static/images/docs/cluster-administration/cluster-settings/log-collections/introduction/receiver-status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png deleted file mode 100644 index e46441da2..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-management.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-management.png deleted file mode 100644 index 401f7521d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-management.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-nodes.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-nodes.png deleted file mode 100644 index 9939b2f9f..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-nodes.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png deleted file mode 100644 index 2b2268c94..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png deleted file mode 100644 index 25f2cba88..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png b/static/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png deleted file mode 100644 index c5a6fc0fc..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/clusters-management.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/component-monitoring.jpg b/static/images/docs/cluster-administration/cluster-status-monitoring/component-monitoring.jpg deleted file mode 100644 index 97818400d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/component-monitoring.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-load-average.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-load-average.png deleted file mode 100644 index eb95cb65d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-load-average.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-utilization.png b/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-utilization.png deleted file mode 100644 index 087eb90d9..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/cpu-utilization.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/disk-throughput.png b/static/images/docs/cluster-administration/cluster-status-monitoring/disk-throughput.png deleted file mode 100644 index a83b8ec63..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/disk-throughput.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/disk-usage.png b/static/images/docs/cluster-administration/cluster-status-monitoring/disk-usage.png deleted file mode 100644 index 315afd624..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/disk-usage.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/edit-yaml.png b/static/images/docs/cluster-administration/cluster-status-monitoring/edit-yaml.png deleted file mode 100644 index 547ccf284..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/edit-yaml.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/etcd-monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/etcd-monitoring.png deleted file mode 100644 index 85de84d03..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/etcd-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/inode-utilization.png b/static/images/docs/cluster-administration/cluster-status-monitoring/inode-utilization.png deleted file mode 100644 index 1aef66b0c..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/inode-utilization.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/iops.png b/static/images/docs/cluster-administration/cluster-status-monitoring/iops.png deleted file mode 100644 index 1ff1df29b..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/iops.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/memory-utilization.png b/static/images/docs/cluster-administration/cluster-status-monitoring/memory-utilization.png deleted file mode 100644 index fdbf667af..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/memory-utilization.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/monitoring.png deleted file mode 100644 index c43737b11..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/netework-bandwidth.png b/static/images/docs/cluster-administration/cluster-status-monitoring/netework-bandwidth.png deleted file mode 100644 index 4d49eccc9..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/netework-bandwidth.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/node-usage-ranking.png b/static/images/docs/cluster-administration/cluster-status-monitoring/node-usage-ranking.png deleted file mode 100644 index 3a6b42049..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/node-usage-ranking.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png deleted file mode 100644 index c8029151c..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/pod-status.png b/static/images/docs/cluster-administration/cluster-status-monitoring/pod-status.png deleted file mode 100644 index 6e613d13d..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/pod-status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png b/static/images/docs/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png deleted file mode 100644 index 4b83310f2..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/service-components-status.png b/static/images/docs/cluster-administration/cluster-status-monitoring/service-components-status.png deleted file mode 100644 index 9e3a4d3af..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/service-components-status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring/status.png b/static/images/docs/cluster-administration/cluster-status-monitoring/status.png deleted file mode 100644 index 0d414707f..000000000 Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring/status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-messages-node-level/alert-message-page.png b/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-messages-node-level/alert-message-page.png deleted file mode 100644 index 556f26dd1..000000000 Binary files a/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-messages-node-level/alert-message-page.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/alerting-policy-details-page.png b/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/alerting-policy-details-page.png deleted file mode 100644 index d54d143f0..000000000 Binary files a/static/images/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policies-node-level/alerting-policy-details-page.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/add-taints.jpg b/static/images/docs/cluster-administration/node-management/add-taints.jpg deleted file mode 100644 index d401171b7..000000000 Binary files a/static/images/docs/cluster-administration/node-management/add-taints.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/assign_pods_to_node.jpg b/static/images/docs/cluster-administration/node-management/assign_pods_to_node.jpg deleted file mode 100644 index d0f4c7144..000000000 Binary files a/static/images/docs/cluster-administration/node-management/assign_pods_to_node.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/clusters-management-select.jpg b/static/images/docs/cluster-administration/node-management/clusters-management-select.jpg deleted file mode 100644 index 8638f8fb2..000000000 Binary files a/static/images/docs/cluster-administration/node-management/clusters-management-select.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/drop-down-list-node.jpg b/static/images/docs/cluster-administration/node-management/drop-down-list-node.jpg deleted file mode 100644 index 8e0bb4902..000000000 Binary files a/static/images/docs/cluster-administration/node-management/drop-down-list-node.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/label_node.jpg b/static/images/docs/cluster-administration/node-management/label_node.jpg deleted file mode 100644 index a6bb4d54e..000000000 Binary files a/static/images/docs/cluster-administration/node-management/label_node.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/node_detail.png b/static/images/docs/cluster-administration/node-management/node_detail.png deleted file mode 100644 index 0b20588bc..000000000 Binary files a/static/images/docs/cluster-administration/node-management/node_detail.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/node_status.png b/static/images/docs/cluster-administration/node-management/node_status.png deleted file mode 100644 index 32ebb6601..000000000 Binary files a/static/images/docs/cluster-administration/node-management/node_status.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/node-management/select-a-cluster.jpg b/static/images/docs/cluster-administration/node-management/select-a-cluster.jpg deleted file mode 100644 index e776a1501..000000000 Binary files a/static/images/docs/cluster-administration/node-management/select-a-cluster.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png deleted file mode 100644 index fe52bf77a..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png deleted file mode 100644 index b09feab40..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.png deleted file mode 100644 index a7352b88b..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg deleted file mode 100644 index 7801cfe97..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png deleted file mode 100644 index 30a55508b..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.png deleted file mode 100644 index fe52bf77a..000000000 Binary files a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/email-server.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/email-server.png deleted file mode 100644 index 948c7caa4..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/email-server.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png deleted file mode 100644 index 9b358b25f..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png deleted file mode 100644 index 5c04d7833..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/example-notification.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/example-notification.png deleted file mode 100644 index 9c143dbb9..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/example-notification.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png deleted file mode 100644 index b23bfec12..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png deleted file mode 100644 index 91f22fffa..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png and /dev/null differ diff --git a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png b/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png deleted file mode 100644 index f34b20499..000000000 Binary files a/static/images/docs/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png and /dev/null differ diff --git a/static/images/docs/common-icons/hammer.png b/static/images/docs/common-icons/hammer.png new file mode 100644 index 000000000..e78ac049a Binary files /dev/null and b/static/images/docs/common-icons/hammer.png differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/plus-button.png b/static/images/docs/common-icons/invite-member-button.png similarity index 100% rename from static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/plus-button.png rename to static/images/docs/common-icons/invite-member-button.png diff --git a/static/images/docs/common-icons/replica-minus-icon.png b/static/images/docs/common-icons/replica-minus-icon.png new file mode 100644 index 000000000..304bf2f91 Binary files /dev/null and b/static/images/docs/common-icons/replica-minus-icon.png differ diff --git a/static/images/docs/common-icons/replica-plus-icon.png b/static/images/docs/common-icons/replica-plus-icon.png new file mode 100644 index 000000000..3230c190d Binary files /dev/null and b/static/images/docs/common-icons/replica-plus-icon.png differ diff --git a/static/images/docs/common-icons/slider.png b/static/images/docs/common-icons/slider.png new file mode 100644 index 000000000..f4da17eab Binary files /dev/null and b/static/images/docs/common-icons/slider.png differ diff --git a/static/images/docs/common-icons/three-dots.png b/static/images/docs/common-icons/three-dots.png new file mode 100644 index 000000000..4ef8b9b46 Binary files /dev/null and b/static/images/docs/common-icons/three-dots.png differ diff --git a/static/images/docs/common-icons/trashcan.png b/static/images/docs/common-icons/trashcan.png new file mode 100644 index 000000000..9be04a4e8 Binary files /dev/null and b/static/images/docs/common-icons/trashcan.png differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/edit-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/edit-jenkinsfile.png deleted file mode 100644 index 2cbdc33e9..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/run-maven-pipeline.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/run-maven-pipeline.png deleted file mode 100644 index e9f372be8..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/run-maven-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-credential-list.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-credential-list.png deleted file mode 100644 index 74b878fd0..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-credential-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-edit-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-edit-jenkinsfile.png deleted file mode 100644 index eef9e2190..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-namespace.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-namespace.png deleted file mode 100644 index e55cc9844..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-namespace.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-pipeline.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-pipeline.png deleted file mode 100644 index a6db8be91..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload-svc.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload-svc.png deleted file mode 100644 index fd9204671..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload-svc.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload.png b/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload.png deleted file mode 100644 index d8375ab82..000000000 Binary files a/static/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg deleted file mode 100644 index 39919c6f7..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.png deleted file mode 100644 index 2f9827ec1..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg deleted file mode 100644 index 39c546cae..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.png deleted file mode 100644 index 2c97c5f9d..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.png deleted file mode 100644 index 569c3812f..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg deleted file mode 100644 index 1ce844ec7..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg deleted file mode 100644 index d9fb0870c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg deleted file mode 100644 index f786fa0c2..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg deleted file mode 100644 index 8bdd0776a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg deleted file mode 100644 index 27d46312c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg deleted file mode 100644 index 623408a6a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg deleted file mode 100644 index 1c5bcc800..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.png deleted file mode 100644 index dbc48aa72..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png deleted file mode 100644 index f416d701c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg deleted file mode 100644 index 525d4df20..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg deleted file mode 100644 index a0e945fc6..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.png deleted file mode 100644 index dd692f9cb..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg deleted file mode 100644 index 9d553876f..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg deleted file mode 100644 index 39919c6f7..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.png deleted file mode 100644 index 5f5b711e4..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg deleted file mode 100644 index 39c546cae..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.png deleted file mode 100644 index b1b117757..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.png deleted file mode 100644 index 063063675..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg deleted file mode 100644 index 1ce844ec7..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg deleted file mode 100644 index d9fb0870c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg deleted file mode 100644 index f786fa0c2..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg deleted file mode 100644 index 8bdd0776a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg deleted file mode 100644 index 27d46312c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg deleted file mode 100644 index 623408a6a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg deleted file mode 100644 index 1c5bcc800..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.png deleted file mode 100644 index c51aa1a41..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.png deleted file mode 100644 index 18577cf6a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.png deleted file mode 100644 index a6450acaa..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.png deleted file mode 100644 index a9d179f96..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.png deleted file mode 100644 index 34823419a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-devops-project.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-devops-project.png deleted file mode 100644 index 62035dd48..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-devops-project.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-workspace.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-workspace.png deleted file mode 100644 index 7adb76aad..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/create-workspace.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/credentials-created.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/credentials-created.png deleted file mode 100644 index cae505d12..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/credentials-created.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/deploy-to-staging.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/deploy-to-staging.png deleted file mode 100644 index e205354d1..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/deploy-to-staging.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project-created.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project-created.png deleted file mode 100644 index bc67e922a..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project-created.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project.png deleted file mode 100644 index 2ac5fa5ed..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/devops-project.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/host-pods.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/host-pods.png deleted file mode 100644 index 7b6d71424..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/host-pods.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-created.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-created.png deleted file mode 100644 index 738315da6..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-created.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-logs.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-logs.png deleted file mode 100644 index 9596b033c..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-logs.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-name.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-name.png deleted file mode 100644 index 409d76060..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-panel.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-panel.png deleted file mode 100644 index 6da083761..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-panel.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-success.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-success.png deleted file mode 100644 index 47ede9e33..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/pipeline-success.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/rohan-pods.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/rohan-pods.png deleted file mode 100644 index 264c7d5ca..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/rohan-pods.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/select-all-clusters.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/select-all-clusters.png deleted file mode 100644 index 3da39bebb..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/select-all-clusters.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/shire-pods.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/shire-pods.png deleted file mode 100644 index 21f7888ba..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/shire-pods.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/sonarqube-result.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/sonarqube-result.png deleted file mode 100644 index 5a6c12ba5..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/sonarqube-result.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/workspace-created.png b/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/workspace-created.png deleted file mode 100644 index 614d62341..000000000 Binary files a/static/images/docs/devops-user-guide/examples/create-multi-cluster-pipeline/workspace-created.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png deleted file mode 100644 index 838f35ec4..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png deleted file mode 100644 index 6266c01b2..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png deleted file mode 100644 index 2229a5403..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png deleted file mode 100644 index 4431d2bb8..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png deleted file mode 100644 index cb2822673..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png deleted file mode 100644 index 54f84d84b..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png deleted file mode 100644 index a66536290..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png deleted file mode 100644 index 90fddb7c4..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png deleted file mode 100644 index b5b0982dc..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png deleted file mode 100644 index 0cd79d1fa..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png deleted file mode 100644 index 83a048a85..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png b/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png deleted file mode 100644 index fcbadaae9..000000000 Binary files a/static/images/docs/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/advanced-settings.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/advanced-settings.png deleted file mode 100644 index 4a2346d98..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/basic-info.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/basic-info.png deleted file mode 100644 index b16863880..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/basic-info.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-credentials.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-credentials.png deleted file mode 100644 index e3a5c4767..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-credentials.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-pipeline.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-pipeline.png deleted file mode 100644 index 84fa4e4ed..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/credentials-page.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/credentials-page.png deleted file mode 100644 index 0b66d3135..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/credentials-page.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/edit-jenkinsfile.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/edit-jenkinsfile.png deleted file mode 100644 index f07043e67..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/export-to-file.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/export-to-file.png deleted file mode 100644 index ba478c7c1..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/export-to-file.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/harbor-projects.jpg b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/harbor-projects.jpg deleted file mode 100644 index 0f7781233..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/harbor-projects.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/image-pushed.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/image-pushed.png deleted file mode 100644 index a6fde17aa..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/image-pushed.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account-name.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account-name.png deleted file mode 100644 index a870d740f..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account.png deleted file mode 100644 index 5ee85c982..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/set-name.png b/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/set-name.png deleted file mode 100644 index eae840c60..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/set-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/access-sonarqube-console.jpg b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/access-sonarqube-console.jpg deleted file mode 100644 index 2cc6ea70d..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/access-sonarqube-console.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/add-sonarqube.png b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/add-sonarqube.png deleted file mode 100644 index 228ca5404..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/add-sonarqube.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/configure-system.png b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/configure-system.png deleted file mode 100644 index b23f5487d..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/configure-system.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/jenkins-login-page.jpg b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/jenkins-login-page.jpg deleted file mode 100644 index 02ab77836..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/jenkins-login-page.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/log-in-page.jpg b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/log-in-page.jpg deleted file mode 100644 index a42666108..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/log-in-page.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/manage-jenkins.png b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/manage-jenkins.png deleted file mode 100644 index 5fb3ead77..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-view-result.jpg b/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-view-result.jpg deleted file mode 100644 index 06f023cd1..000000000 Binary files a/static/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-view-result.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops.png deleted file mode 100644 index 558e59a84..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page.png deleted file mode 100644 index 9fd0a4fdc..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list.png deleted file mode 100644 index 6c44411ab..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create.png deleted file mode 100644 index 40d37b272..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info.png deleted file mode 100644 index badda2a10..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list.png deleted file mode 100644 index 62b0db84e..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.jpg b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.jpg deleted file mode 100644 index 6e215d91c..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_invite_member.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_invite_member.png deleted file mode 100644 index 23627f36a..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_invite_member.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_list.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_list.png deleted file mode 100644 index 3e001adcc..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step1.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step1.png deleted file mode 100644 index 2b5a4c417..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step2.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step2.png deleted file mode 100644 index 17a3fa2ea..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_role_step2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_user_edit.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_user_edit.png deleted file mode 100644 index 93feead12..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops_user_edit.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/plus-button.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/plus-button.png deleted file mode 100644 index 1d9d6efbc..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/plus-button.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png b/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png deleted file mode 100644 index 37c20ebd2..000000000 Binary files a/static/images/docs/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.png deleted file mode 100644 index adb0eb523..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/activity-faliure.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/activity-faliure.png deleted file mode 100644 index 73a890660..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/activity-faliure.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg deleted file mode 100644 index 882f3b231..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting1.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting1.png deleted file mode 100644 index 25bd48bc5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/advanced-setting1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings-1.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings-1.png deleted file mode 100644 index ffa6ce58a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings-1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg deleted file mode 100644 index 7bb09f50e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/click-service.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/click-service.png deleted file mode 100644 index afa69ffc7..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/click-service.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/code-quality.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/code-quality.png deleted file mode 100644 index 5cf698aa0..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/code-quality.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg deleted file mode 100644 index 3c16d46e4..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-a-pipeline1.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-a-pipeline1.png deleted file mode 100644 index b44e991ec..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-a-pipeline1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.png deleted file mode 100644 index 434ea75ca..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg deleted file mode 100644 index ada998195..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.png deleted file mode 100644 index 29a9dec65..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.png deleted file mode 100644 index 5c4b2d56a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.png deleted file mode 100644 index 7794e7fbb..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg deleted file mode 100644 index 3846706bc..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/github-result.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/github-result.png deleted file mode 100644 index 78846680b..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/github-result.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.png deleted file mode 100644 index 7e15429f9..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg deleted file mode 100644 index 838871b70..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.png deleted file mode 100644 index e39540f88..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg deleted file mode 100644 index 36448ba88..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.png deleted file mode 100644 index e59ddb10e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.png deleted file mode 100644 index ae87131a2..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.png deleted file mode 100644 index 7357cf1f1..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.png deleted file mode 100644 index c070febb8..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/project-list.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/project-list.png deleted file mode 100644 index 6f116b40e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/project-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.png deleted file mode 100644 index 872e0df70..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-repo.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-repo.png deleted file mode 100644 index c0864af8a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-repo.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-token.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-token.png deleted file mode 100644 index 111e5985d..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/select-token.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg deleted file mode 100644 index 613e98de6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.png deleted file mode 100644 index ac077056b..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg deleted file mode 100644 index 98a866794..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/access-service.jpg b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/access-service.jpg deleted file mode 100644 index f1d0e7edc..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/access-service.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/complete.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/complete.png deleted file mode 100644 index 646b2167c..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/complete.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/inspect-logs.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/inspect-logs.png deleted file mode 100644 index 12725bc50..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/inspect-logs.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/kubernetesDeploy.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/kubernetesDeploy.png deleted file mode 100644 index f0be8a034..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/kubernetesDeploy.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-done.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-done.png deleted file mode 100644 index 5e4a126c3..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-done.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/service-exposed.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/service-exposed.png deleted file mode 100644 index cdaf723bd..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/service-exposed.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell.png deleted file mode 100644 index be6e5f3d5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/shell.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/unit-test-set.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/unit-test-set.png deleted file mode 100644 index 6b82c004f..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/unit-test-set.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/view-deployment.png b/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/view-deployment.png deleted file mode 100644 index d99509aed..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/view-deployment.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-page.png b/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-page.png deleted file mode 100644 index 04ea9ef0a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-page.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-step1.png b/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-step1.png deleted file mode 100644 index 8882744d5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/create-credential-step1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/credential-detail-page.png b/static/images/docs/devops-user-guide/using-devops/credential-management/credential-detail-page.png deleted file mode 100644 index 0bae89944..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/credential-detail-page.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/credentials-list.png b/static/images/docs/devops-user-guide/using-devops/credential-management/credentials-list.png deleted file mode 100644 index 9e53a8ee3..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/credentials-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/dockerhub-credentials.png b/static/images/docs/devops-user-guide/using-devops/credential-management/dockerhub-credentials.png deleted file mode 100644 index 1b3a9e321..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/dockerhub-credentials.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/credential-management/edit-credentials.png b/static/images/docs/devops-user-guide/using-devops/credential-management/edit-credentials.png deleted file mode 100644 index 72aac20b6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/credential-management/edit-credentials.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/check-log.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/check-log.png deleted file mode 100644 index a7a2f23f4..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/check-log.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-import-project.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-import-project.png deleted file mode 100644 index 1cc58f9e7..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-import-project.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-jenkinsfile.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-jenkinsfile.png deleted file mode 100644 index b2a2d39c4..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-run.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-run.png deleted file mode 100644 index b47d82272..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/click-run.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/commit-changes.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/commit-changes.png deleted file mode 100644 index 5b5c81b9a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/commit-changes.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/create-pipeline.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/create-pipeline.png deleted file mode 100644 index 45a35bcc3..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/create-pipeline.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/credential-created.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/credential-created.png deleted file mode 100644 index ecd373c9d..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/credential-created.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/deployment.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/deployment.png deleted file mode 100644 index 65d758525..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/deployment.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/docker-image.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/docker-image.png deleted file mode 100644 index 0ad871ac7..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/docker-image.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/gitlab-result.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/gitlab-result.png deleted file mode 100644 index 6cf758c50..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/gitlab-result.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png deleted file mode 100644 index 6542561b1..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/new-branch.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/new-branch.png deleted file mode 100644 index 8e197f35c..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/new-branch.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/pipeline-logs.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/pipeline-logs.png deleted file mode 100644 index 5ec0a04f6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/pipeline-logs.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-branch.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-branch.png deleted file mode 100644 index e036dd757..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-branch.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-gitlab.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-gitlab.png deleted file mode 100644 index 1c5601ab0..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/select-gitlab.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/service.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/service.png deleted file mode 100644 index 416ade980..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/service.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/use-git-url.png b/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/use-git-url.png deleted file mode 100644 index 3db6292a1..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/gitlab-multibranch-pipeline/use-git-url.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email-3.jpg b/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email-3.jpg deleted file mode 100644 index a556e8cf9..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email-3.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email.png b/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email.png new file mode 100644 index 000000000..76fe42f52 Binary files /dev/null and b/static/images/docs/devops-user-guide/using-devops/jenkins-email/set-jenkins-email.png differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-email/three-dots.png b/static/images/docs/devops-user-guide/using-devops/jenkins-email/three-dots.png deleted file mode 100644 index bd841635e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-email/three-dots.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-email/workloads-list.png b/static/images/docs/devops-user-guide/using-devops/jenkins-email/workloads-list.png deleted file mode 100644 index cada1e8b4..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-email/workloads-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-add.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-add.png deleted file mode 100644 index ee617cd4c..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-add.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-configure.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-configure.png deleted file mode 100644 index d97fab861..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-configure.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-create.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-create.png deleted file mode 100644 index a4e3003d4..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-create.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-run.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-run.png deleted file mode 100644 index 8ec5e21b5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/click-run.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/configure-shared-library.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/configure-shared-library.png deleted file mode 100644 index c7270abfa..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/configure-shared-library.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/edit-jenkinsfile.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/edit-jenkinsfile.png deleted file mode 100644 index a611a3ef7..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/log-details.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/log-details.png deleted file mode 100644 index 483c75c1c..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/log-details.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/run-successfully.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/run-successfully.png deleted file mode 100644 index 35912fbe6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/run-successfully.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/set-name.png b/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/set-name.png deleted file mode 100644 index 641e56257..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-shared-library/set-name.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/apply-config.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/apply-config.png deleted file mode 100644 index 1944ed843..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/apply-config.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/configuration-as-code.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/configuration-as-code.png deleted file mode 100644 index a76577429..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/configuration-as-code.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-configmap.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-configmap.png deleted file mode 100644 index e8ffa3111..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-configmap.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-jenkins.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-jenkins.png deleted file mode 100644 index 343f5910a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/edit-jenkins.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/jenkins-dashboard.jpg b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/jenkins-dashboard.jpg deleted file mode 100644 index ee22aa657..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/jenkins-dashboard.jpg and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/manage-jenkins.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/manage-jenkins.png deleted file mode 100644 index fbbd4cef2..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/more-list.png b/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/more-list.png deleted file mode 100644 index 573c24b77..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/jenkins-system-settings/more-list.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/basic-info-tab.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/basic-info-tab.png deleted file mode 100644 index bf924f50f..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/basic-info-tab.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/behavioral-strategy.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/behavioral-strategy.png deleted file mode 100644 index 0fd09b935..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/behavioral-strategy.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/branch-settings.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/branch-settings.png deleted file mode 100644 index 1c3d0f0f5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/branch-settings.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-settings.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-settings.png deleted file mode 100644 index e9cf5437e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-settings.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger-2.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger-2.png deleted file mode 100644 index 6d033079e..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger-2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger.png deleted file mode 100644 index a700a5868..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/build-trigger.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-bitbucket.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-bitbucket.png deleted file mode 100644 index d277cc36c..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-bitbucket.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-git.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-git.png deleted file mode 100644 index a0a128eab..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-git.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-github.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-github.png deleted file mode 100644 index 3f73da2ff..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-github.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-gitlab.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-gitlab.png deleted file mode 100644 index 8a84c4728..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-gitlab.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-svn.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-svn.png deleted file mode 100644 index 00426178d..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/code-source-svn.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/git-clone-options.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/git-clone-options.png deleted file mode 100644 index 1d7b27208..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/git-clone-options.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/parametric-build.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/parametric-build.png deleted file mode 100644 index b6b85b6fc..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/parametric-build.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/scan-repo-trigger.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/scan-repo-trigger.png deleted file mode 100644 index 8c113c538..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/scan-repo-trigger.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/script-path.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/script-path.png deleted file mode 100644 index 492eae786..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/script-path.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/webhook-push.png b/static/images/docs/devops-user-guide/using-devops/pipeline-settings/webhook-push.png deleted file mode 100644 index aa34ab4d7..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-settings/webhook-push.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/add-webhook.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/add-webhook.png deleted file mode 100644 index df5fb562f..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/add-webhook.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-add-webhook.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-add-webhook.png deleted file mode 100644 index 1016a0c9f..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-add-webhook.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-file.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-file.png deleted file mode 100644 index 4c26dba5a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-file.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-sonar.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-sonar.png deleted file mode 100644 index 8acdabad6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/click-sonar.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/delivery-detail.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/delivery-detail.png deleted file mode 100644 index 7bf47c85f..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/delivery-detail.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-config.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-config.png deleted file mode 100644 index 1735a9476..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-config.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-file.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-file.png deleted file mode 100644 index 5a817c061..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/edit-file.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pipeline-triggered.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pipeline-triggered.png deleted file mode 100644 index 97531e744..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pipeline-triggered.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pods.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pods.png deleted file mode 100644 index c7031fb7d..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/pods.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-push.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-push.png deleted file mode 100644 index 6627d753a..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-push.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-ready.png b/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-ready.png deleted file mode 100644 index 35b3eb533..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/pipeline-webhook/webhook-ready.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-1.png b/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-1.png deleted file mode 100644 index 060b598b5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-1.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-2.png b/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-2.png deleted file mode 100644 index b8ffd30f6..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-2.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-3.png b/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-3.png deleted file mode 100644 index 70705b2b5..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-3.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-4.png b/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-4.png deleted file mode 100644 index 47fd5b2c2..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-4.png and /dev/null differ diff --git a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-5.png b/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-5.png deleted file mode 100644 index 35bf4dcbd..000000000 Binary files a/static/images/docs/devops-user-guide/using-devops/set-ci-node-for-dependency-cache/set-node-5.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubeedge/edge-nodes.png b/static/images/docs/enable-pluggable-components/kubeedge/edge-nodes.png deleted file mode 100644 index 0107e9ba1..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubeedge/edge-nodes.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-alerting/alerting-section.png b/static/images/docs/enable-pluggable-components/kubesphere-alerting/alerting-section.png deleted file mode 100644 index 8221199bf..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-alerting/alerting-section.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-app-store/app-store.png b/static/images/docs/enable-pluggable-components/kubesphere-app-store/app-store.png deleted file mode 100644 index 8d42a7c8e..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-app-store/app-store.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png b/static/images/docs/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png deleted file mode 100644 index 3829286ec..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-devops-system/devops.png b/static/images/docs/enable-pluggable-components/kubesphere-devops-system/devops.png deleted file mode 100644 index f6895aa33..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-devops-system/devops.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-events/event-search.png b/static/images/docs/enable-pluggable-components/kubesphere-events/event-search.png deleted file mode 100644 index 94d229a9b..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-events/event-search.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-logging-system/logging.png b/static/images/docs/enable-pluggable-components/kubesphere-logging-system/logging.png deleted file mode 100644 index 4105ca80d..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-logging-system/logging.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/kubesphere-service-mesh/istio.png b/static/images/docs/enable-pluggable-components/kubesphere-service-mesh/istio.png deleted file mode 100644 index 9ccc9e845..000000000 Binary files a/static/images/docs/enable-pluggable-components/kubesphere-service-mesh/istio.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/network-policies/networkpolicy.png b/static/images/docs/enable-pluggable-components/network-policies/networkpolicy.png deleted file mode 100644 index 0e05c71e5..000000000 Binary files a/static/images/docs/enable-pluggable-components/network-policies/networkpolicy.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png b/static/images/docs/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png deleted file mode 100644 index aa901e512..000000000 Binary files a/static/images/docs/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png and /dev/null differ diff --git a/static/images/docs/enable-pluggable-components/service-topology/topology.png b/static/images/docs/enable-pluggable-components/service-topology/topology.png deleted file mode 100644 index 83a4a78a8..000000000 Binary files a/static/images/docs/enable-pluggable-components/service-topology/topology.png and /dev/null differ diff --git a/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/project-page.png b/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/project-page.png deleted file mode 100644 index 6291c866e..000000000 Binary files a/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/project-page.png and /dev/null differ diff --git a/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/workspace-project.png b/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/workspace-project.png deleted file mode 100644 index 0802f9f52..000000000 Binary files a/static/images/docs/faq/access-control-and-account-management/add-exisiting-namespaces-to-a-kubesphere-workspace/workspace-project.png and /dev/null differ diff --git a/static/images/docs/faq/applications/remove-built-in-apps/activate_tomcat.png b/static/images/docs/faq/applications/remove-built-in-apps/activate_tomcat.png deleted file mode 100644 index 95f3803bb..000000000 Binary files a/static/images/docs/faq/applications/remove-built-in-apps/activate_tomcat.png and /dev/null differ diff --git a/static/images/docs/faq/applications/remove-built-in-apps/click_tomcat.png b/static/images/docs/faq/applications/remove-built-in-apps/click_tomcat.png deleted file mode 100644 index d29aee0c0..000000000 Binary files a/static/images/docs/faq/applications/remove-built-in-apps/click_tomcat.png and /dev/null differ diff --git a/static/images/docs/faq/applications/remove-built-in-apps/confirm_suspend.png b/static/images/docs/faq/applications/remove-built-in-apps/confirm_suspend.png deleted file mode 100644 index 63f10e3d1..000000000 Binary files a/static/images/docs/faq/applications/remove-built-in-apps/confirm_suspend.png and /dev/null differ diff --git a/static/images/docs/faq/applications/remove-built-in-apps/suspend_tomcat.png b/static/images/docs/faq/applications/remove-built-in-apps/suspend_tomcat.png deleted file mode 100644 index eb2f0259b..000000000 Binary files a/static/images/docs/faq/applications/remove-built-in-apps/suspend_tomcat.png and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/confirm-delete.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/confirm-delete.PNG deleted file mode 100644 index 601ad7423..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/confirm-delete.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-redis-1.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-redis-1.PNG deleted file mode 100644 index 772586e42..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-redis-1.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-secret.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-secret.PNG deleted file mode 100644 index 1334137d0..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/delete-secret.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/error-prompt.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/error-prompt.PNG deleted file mode 100644 index 562caf5f5..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/error-prompt.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/new-redis-app.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/new-redis-app.PNG deleted file mode 100644 index f955d494f..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/new-redis-app.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/redis-1.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/redis-1.PNG deleted file mode 100644 index 3d3c14d7c..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/redis-1.PNG and /dev/null differ diff --git a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/search-secret.PNG b/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/search-secret.PNG deleted file mode 100644 index ae2e98f33..000000000 Binary files a/static/images/docs/faq/applications/use-the-same-app-name-after-deletion/search-secret.PNG and /dev/null differ diff --git a/static/images/docs/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png b/static/images/docs/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png deleted file mode 100644 index fa6200777..000000000 Binary files a/static/images/docs/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/available-plugins.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/available-plugins.png deleted file mode 100644 index b1711a8fe..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/available-plugins.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png deleted file mode 100644 index c43c1fa96..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png deleted file mode 100644 index bb9ffe66d..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png deleted file mode 100644 index b9edf73db..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/installed-plugins.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/installed-plugins.png deleted file mode 100644 index 14bdfd247..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/installed-plugins.png and /dev/null differ diff --git a/static/images/docs/faq/devops/install-plugins-to-jenkins/update-plugins.png b/static/images/docs/faq/devops/install-plugins-to-jenkins/update-plugins.png deleted file mode 100644 index 3cfb7ba0e..000000000 Binary files a/static/images/docs/faq/devops/install-plugins-to-jenkins/update-plugins.png and /dev/null differ diff --git a/static/images/docs/faq/forgot-password/modify-password.png b/static/images/docs/faq/forgot-password/modify-password.png deleted file mode 100644 index 8a67b09f1..000000000 Binary files a/static/images/docs/faq/forgot-password/modify-password.png and /dev/null differ diff --git a/static/images/docs/huawei-cce/en/login-ks-console.png b/static/images/docs/huawei-cce/en/login-ks-console.png deleted file mode 100644 index 9c24c783e..000000000 Binary files a/static/images/docs/huawei-cce/en/login-ks-console.png and /dev/null differ diff --git a/static/images/docs/huawei-cce/en/view-ks-console-full.png b/static/images/docs/huawei-cce/en/view-ks-console-full.png deleted file mode 100644 index 9bf4727b5..000000000 Binary files a/static/images/docs/huawei-cce/en/view-ks-console-full.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-added.png b/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-added.png deleted file mode 100644 index c571d093f..000000000 Binary files a/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-added.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-dialog.png b/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-dialog.png deleted file mode 100644 index 1b4e6ddc5..000000000 Binary files a/static/images/docs/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-dialog.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png b/static/images/docs/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png deleted file mode 100644 index 9904c3da5..000000000 Binary files a/static/images/docs/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/faq/configure-booster/booster-url.png b/static/images/docs/installing-on-linux/faq/configure-booster/booster-url.png deleted file mode 100644 index 3218a4fcd..000000000 Binary files a/static/images/docs/installing-on-linux/faq/configure-booster/booster-url.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/faq/configure-booster/container-registry.png b/static/images/docs/installing-on-linux/faq/configure-booster/container-registry.png deleted file mode 100644 index 7d4d593d3..000000000 Binary files a/static/images/docs/installing-on-linux/faq/configure-booster/container-registry.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/faq/configure-booster/image-booster.png b/static/images/docs/installing-on-linux/faq/configure-booster/image-booster.png deleted file mode 100644 index e2118f259..000000000 Binary files a/static/images/docs/installing-on-linux/faq/configure-booster/image-booster.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png b/static/images/docs/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png deleted file mode 100644 index 4eb3282d6..000000000 Binary files a/static/images/docs/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/on-premises/cluster-management.png b/static/images/docs/installing-on-linux/on-premises/cluster-management.png deleted file mode 100644 index 5aeea0e7d..000000000 Binary files a/static/images/docs/installing-on-linux/on-premises/cluster-management.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/on-premises/service-components.png b/static/images/docs/installing-on-linux/on-premises/service-components.png deleted file mode 100644 index fe3dddd41..000000000 Binary files a/static/images/docs/installing-on-linux/on-premises/service-components.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png deleted file mode 100644 index 60688c2f3..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png deleted file mode 100644 index a887b2ec9..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png deleted file mode 100644 index b4402bf88..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png deleted file mode 100644 index 7510ad510..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png deleted file mode 100644 index 8ef47448c..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png and /dev/null differ diff --git a/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png b/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png deleted file mode 100644 index 760ba68aa..000000000 Binary files a/static/images/docs/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.png deleted file mode 100644 index 4f60539bb..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.png deleted file mode 100644 index 13cb7e52f..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.png deleted file mode 100644 index 611ee056e..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png deleted file mode 100644 index f33751e96..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.png deleted file mode 100644 index 4f60539bb..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.png deleted file mode 100644 index ce91c2507..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.png deleted file mode 100644 index 611ee056e..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.png b/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.png deleted file mode 100644 index 2737739b0..000000000 Binary files a/static/images/docs/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png deleted file mode 100644 index ba5b07a35..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png deleted file mode 100644 index 0be32732c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png deleted file mode 100644 index 53f50862c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png deleted file mode 100644 index 20ddd0e95..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png deleted file mode 100644 index e1f3c29ed..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png deleted file mode 100644 index e77516a5e..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png deleted file mode 100644 index 0be32732c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png deleted file mode 100644 index 53f50862c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png deleted file mode 100644 index c9dc927ce..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png deleted file mode 100644 index fa0498763..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png deleted file mode 100644 index 76462cd48..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png deleted file mode 100644 index e1f3c29ed..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png deleted file mode 100644 index 0be32732c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png deleted file mode 100644 index 53f50862c..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png deleted file mode 100644 index 7445846fd..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png deleted file mode 100644 index 174098df2..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png deleted file mode 100644 index e1f3c29ed..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png b/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png deleted file mode 100644 index 922bbdb64..000000000 Binary files a/static/images/docs/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png and /dev/null differ diff --git a/static/images/docs/multicluster-management/introduction/kubesphere-federation/central-control-plane.png b/static/images/docs/multicluster-management/introduction/kubesphere-federation/central-control-plane.png deleted file mode 100644 index 9d50d1c85..000000000 Binary files a/static/images/docs/multicluster-management/introduction/kubesphere-federation/central-control-plane.png and /dev/null differ diff --git a/static/images/docs/project-administration/container-limit-ranges/default-limit-range.png b/static/images/docs/project-administration/container-limit-ranges/default-limit-range.png deleted file mode 100644 index 2b26583d1..000000000 Binary files a/static/images/docs/project-administration/container-limit-ranges/default-limit-range.png and /dev/null differ diff --git a/static/images/docs/project-administration/container-limit-ranges/view-limit-ranges.png b/static/images/docs/project-administration/container-limit-ranges/view-limit-ranges.png deleted file mode 100644 index 4b4cc6204..000000000 Binary files a/static/images/docs/project-administration/container-limit-ranges/view-limit-ranges.png and /dev/null differ diff --git a/static/images/docs/project-administration/container-limit-ranges/workload-values.png b/static/images/docs/project-administration/container-limit-ranges/workload-values.png deleted file mode 100644 index 0fd722fc6..000000000 Binary files a/static/images/docs/project-administration/container-limit-ranges/workload-values.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/alpine-image.png b/static/images/docs/project-administration/disk-log-collection/alpine-image.png deleted file mode 100644 index f95679ad3..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/alpine-image.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/container-log.png b/static/images/docs/project-administration/disk-log-collection/container-log.png deleted file mode 100644 index e860b0699..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/container-log.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/enable-disk-log-collection.png b/static/images/docs/project-administration/disk-log-collection/enable-disk-log-collection.png deleted file mode 100644 index 71dd671dc..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/enable-disk-log-collection.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/fuzzy-match.png b/static/images/docs/project-administration/disk-log-collection/fuzzy-match.png deleted file mode 100644 index 15d2a1022..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/fuzzy-match.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/log-toggle-switch.png b/static/images/docs/project-administration/disk-log-collection/log-toggle-switch.png index 3cca18446..3df5fbb17 100644 Binary files a/static/images/docs/project-administration/disk-log-collection/log-toggle-switch.png and b/static/images/docs/project-administration/disk-log-collection/log-toggle-switch.png differ diff --git a/static/images/docs/project-administration/disk-log-collection/mount-volumes.png b/static/images/docs/project-administration/disk-log-collection/mount-volumes.png deleted file mode 100644 index fb06622c9..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/mount-volumes.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/run-command.png b/static/images/docs/project-administration/disk-log-collection/run-command.png deleted file mode 100644 index eec8c4228..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/run-command.png and /dev/null differ diff --git a/static/images/docs/project-administration/disk-log-collection/volume-example.png b/static/images/docs/project-administration/disk-log-collection/volume-example.png deleted file mode 100644 index 47524586f..000000000 Binary files a/static/images/docs/project-administration/disk-log-collection/volume-example.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png b/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png deleted file mode 100644 index be9cffbb8..000000000 Binary files a/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-list.png b/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-list.png deleted file mode 100644 index 45970ac60..000000000 Binary files a/static/images/docs/project-administration/project-and-multicluster-project/multi-cluster-list.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-and-multicluster-project/project-basic-information.png b/static/images/docs/project-administration/project-and-multicluster-project/project-basic-information.png deleted file mode 100644 index a47723cc6..000000000 Binary files a/static/images/docs/project-administration/project-and-multicluster-project/project-basic-information.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-and-multicluster-project/project-list.png b/static/images/docs/project-administration/project-and-multicluster-project/project-list.png deleted file mode 100644 index bf5c5617f..000000000 Binary files a/static/images/docs/project-administration/project-and-multicluster-project/project-list.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-gateway/access-method.png b/static/images/docs/project-administration/project-gateway/access-method.png deleted file mode 100644 index 24b58ff51..000000000 Binary files a/static/images/docs/project-administration/project-gateway/access-method.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-gateway/lb.png b/static/images/docs/project-administration/project-gateway/lb.png deleted file mode 100644 index 243731090..000000000 Binary files a/static/images/docs/project-administration/project-gateway/lb.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-gateway/nodeport.jpg b/static/images/docs/project-administration/project-gateway/nodeport.jpg deleted file mode 100644 index f3e1bf564..000000000 Binary files a/static/images/docs/project-administration/project-gateway/nodeport.jpg and /dev/null differ diff --git a/static/images/docs/project-administration/project-gateway/set-project-gateway.jpg b/static/images/docs/project-administration/project-gateway/set-project-gateway.jpg deleted file mode 100644 index aab041a53..000000000 Binary files a/static/images/docs/project-administration/project-gateway/set-project-gateway.jpg and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/egress-CIDR-added.png b/static/images/docs/project-administration/project-network-isolation/egress-CIDR-added.png deleted file mode 100644 index fbd923fdd..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/egress-CIDR-added.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/egress-CIDR.png b/static/images/docs/project-administration/project-network-isolation/egress-CIDR.png deleted file mode 100644 index 824983ae3..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/egress-CIDR.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/egress-rule-added.png b/static/images/docs/project-administration/project-network-isolation/egress-rule-added.png deleted file mode 100644 index 23d1f2845..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/egress-rule-added.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/engress-rule.png b/static/images/docs/project-administration/project-network-isolation/engress-rule.png deleted file mode 100644 index 72b4eb4eb..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/engress-rule.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/ingress-CIDR.png b/static/images/docs/project-administration/project-network-isolation/ingress-CIDR.png deleted file mode 100644 index 2f81690c0..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/ingress-CIDR.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/ingress-cidr-set.png b/static/images/docs/project-administration/project-network-isolation/ingress-cidr-set.png deleted file mode 100644 index 3a582757e..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/ingress-cidr-set.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/ingress-rule-added.png b/static/images/docs/project-administration/project-network-isolation/ingress-rule-added.png deleted file mode 100644 index c9c587a97..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/ingress-rule-added.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/ingress-rule.png b/static/images/docs/project-administration/project-network-isolation/ingress-rule.png deleted file mode 100644 index 8924efb9c..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/ingress-rule.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/isolation-off.png b/static/images/docs/project-administration/project-network-isolation/isolation-off.png deleted file mode 100644 index 4726173ac..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/isolation-off.png and /dev/null differ diff --git a/static/images/docs/project-administration/project-network-isolation/project-network-isolation.png b/static/images/docs/project-administration/project-network-isolation/project-network-isolation.png deleted file mode 100644 index cb3888510..000000000 Binary files a/static/images/docs/project-administration/project-network-isolation/project-network-isolation.png and /dev/null differ diff --git a/static/images/docs/project-administration/role-and-member-management/edit-project-account.png b/static/images/docs/project-administration/role-and-member-management/edit-project-account.png deleted file mode 100644 index 488d80cb0..000000000 Binary files a/static/images/docs/project-administration/role-and-member-management/edit-project-account.png and /dev/null differ diff --git a/static/images/docs/project-administration/role-and-member-management/project-role-details.png b/static/images/docs/project-administration/role-and-member-management/project-role-details.png deleted file mode 100644 index 385786f04..000000000 Binary files a/static/images/docs/project-administration/role-and-member-management/project-role-details.png and /dev/null differ diff --git a/static/images/docs/project-administration/role-and-member-management/project-role-list.png b/static/images/docs/project-administration/role-and-member-management/project-role-list.png deleted file mode 100644 index b2453603f..000000000 Binary files a/static/images/docs/project-administration/role-and-member-management/project-role-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/alerting/alerting-messages/alerting-messages.png b/static/images/docs/project-user-guide/alerting/alerting-messages/alerting-messages.png deleted file mode 100644 index 7593b6318..000000000 Binary files a/static/images/docs/project-user-guide/alerting/alerting-messages/alerting-messages.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/alerting/alerting-policies/alert-policy-created.png b/static/images/docs/project-user-guide/alerting/alerting-policies/alert-policy-created.png deleted file mode 100644 index 147fb327c..000000000 Binary files a/static/images/docs/project-user-guide/alerting/alerting-policies/alert-policy-created.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png b/static/images/docs/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png deleted file mode 100644 index 725954671..000000000 Binary files a/static/images/docs/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/alerting/alerting-policies/rule-template.png b/static/images/docs/project-user-guide/alerting/alerting-policies/rule-template.png deleted file mode 100644 index 6b5cdaf25..000000000 Binary files a/static/images/docs/project-user-guide/alerting/alerting-policies/rule-template.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/add-image.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/add-image.png deleted file mode 100644 index 7dd2f9e86..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/add-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/container-security-context.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/container-security-context.png deleted file mode 100644 index 041127922..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/container-security-context.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/cube-icon.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/cube-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/env-variables.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/env-variables.png deleted file mode 100644 index f70cddc95..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/env-variables.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/exec-check.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/exec-check.png deleted file mode 100644 index d7b4d077e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/exec-check.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/health-checker.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/health-checker.png deleted file mode 100644 index 37fd9b13f..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/health-checker.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/http-check.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/http-check.png deleted file mode 100644 index d51474f1e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/http-check.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/image-policy.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/image-policy.png deleted file mode 100644 index ff648e89c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/image-policy.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/minus-icon.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/plus-icon.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/pod-replicas.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/pod-replicas.png deleted file mode 100644 index 649e52689..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/pod-replicas.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/requests-limits.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/requests-limits.png deleted file mode 100644 index 499053dfc..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/requests-limits.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/start-command.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/start-command.png deleted file mode 100644 index d4873a868..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/start-command.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/container-image-settings/tcp-port.png b/static/images/docs/project-user-guide/application-workloads/container-image-settings/tcp-port.png deleted file mode 100644 index 8eaac5f2c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/container-image-settings/tcp-port.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/basic-info.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/basic-info.png deleted file mode 100644 index 3ad0cb535..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/basic-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/click-create.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/click-create.png deleted file mode 100644 index eb802cee9..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/click-create.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/container-log-icon.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/container-log-icon.png new file mode 100644 index 000000000..3f8ff8891 Binary files /dev/null and b/static/images/docs/project-user-guide/application-workloads/cronjobs/container-log-icon.png differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/cronjob.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/cronjob.png deleted file mode 100644 index e8fb553cb..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/cronjob.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/down-arrow.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/events.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/events.png deleted file mode 100644 index c484870c1..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/exe-records.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/exe-records.png deleted file mode 100644 index 97aeb718c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/exe-records.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/image-set.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/image-set.png deleted file mode 100644 index d754c8d11..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/image-set.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/job-detail.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/job-detail.png deleted file mode 100644 index 1a18e88e4..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/job-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/job-records.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/job-records.png deleted file mode 100644 index 0d49e55b9..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/job-records.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/jobs.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/jobs.png deleted file mode 100644 index 790d146db..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/jobs.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/log-detail.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/log-detail.png deleted file mode 100644 index d1d931232..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/log-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/metadata.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/metadata.png deleted file mode 100644 index 7cb26e226..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/modify.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/modify.png deleted file mode 100644 index 37277e426..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/modify.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/set-image.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/set-image.png deleted file mode 100644 index 7e4525bc1..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/set-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/start-command.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/start-command.png deleted file mode 100644 index 5de7f0caa..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/start-command.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/cronjobs/view-log.png b/static/images/docs/project-user-guide/application-workloads/cronjobs/view-log.png deleted file mode 100644 index 782a43929..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/cronjobs/view-log.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/advanced-settings.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/advanced-settings.png deleted file mode 100644 index 6ade8dd3b..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/basic-info.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/basic-info.png deleted file mode 100644 index 00a6b2d99..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/basic-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-add-image.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/click-add-image.png deleted file mode 100644 index 3a51c70af..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-add-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-create.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/click-create.png deleted file mode 100644 index 67f455637..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-create.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-dots.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/click-dots.png deleted file mode 100644 index db5f849a0..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-dots.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-more.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/click-more.png deleted file mode 100644 index a5cc968a5..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-more.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-time-interval.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/click-time-interval.png deleted file mode 100644 index f232855ba..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/click-time-interval.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/detail-page.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/detail-page.png deleted file mode 100644 index 06158486a..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/detail-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/enter-image.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/enter-image.png deleted file mode 100644 index 279ec363f..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/enter-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/env-variables.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/env-variables.png deleted file mode 100644 index 139c8b805..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/env-variables.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/events.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/events.png deleted file mode 100644 index 49e18de37..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/metadata.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/metadata.png deleted file mode 100644 index b976f4802..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/monitoring.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/monitoring.png deleted file mode 100644 index f583ae720..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/monitoring.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/mount-volumes.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/mount-volumes.png deleted file mode 100644 index a0263e68a..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/mount-volumes.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/pod-detail.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/pod-detail.png deleted file mode 100644 index 566ce3437..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/pod-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/refresh.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/resource-status.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/resource-status.png deleted file mode 100644 index 82c41f1fc..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/set-requests-limits.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/set-requests-limits.png deleted file mode 100644 index c7521202f..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/daemonsets/set-requests-limits.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/start-refresh.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/start-refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/stop-refresh.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/stop-refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/daemonsets/three-dots.png b/static/images/docs/project-user-guide/application-workloads/daemonsets/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/add-container-image.png b/static/images/docs/project-user-guide/application-workloads/deployments/add-container-image.png deleted file mode 100644 index 5410a8eef..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/add-container-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/advanced-settings.png b/static/images/docs/project-user-guide/application-workloads/deployments/advanced-settings.png deleted file mode 100644 index 0efffcf3d..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/click-more.png b/static/images/docs/project-user-guide/application-workloads/deployments/click-more.png deleted file mode 100644 index 2deb6eb35..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/click-more.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/click-to-modify.png b/static/images/docs/project-user-guide/application-workloads/deployments/click-to-modify.png deleted file mode 100644 index 6bb0d4ee5..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/click-to-modify.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/deploy-details.png b/static/images/docs/project-user-guide/application-workloads/deployments/deploy-details.png deleted file mode 100644 index 377164890..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/deploy-details.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/deployments.png b/static/images/docs/project-user-guide/application-workloads/deployments/deployments.png deleted file mode 100644 index daa4a166d..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/deployments.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/deployments_autorefresh_start.png b/static/images/docs/project-user-guide/application-workloads/deployments/deployments_autorefresh_start.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/deployments_autorefresh_stop.png b/static/images/docs/project-user-guide/application-workloads/deployments/deployments_autorefresh_stop.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/deployments_refresh.png b/static/images/docs/project-user-guide/application-workloads/deployments/deployments_refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/down-arrow.png b/static/images/docs/project-user-guide/application-workloads/deployments/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/enter-info.png b/static/images/docs/project-user-guide/application-workloads/deployments/enter-info.png deleted file mode 100644 index 0cdf8c2b3..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/enter-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/enter-nginx.png b/static/images/docs/project-user-guide/application-workloads/deployments/enter-nginx.png deleted file mode 100644 index e39a75a21..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/enter-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/env-variables.png b/static/images/docs/project-user-guide/application-workloads/deployments/env-variables.png deleted file mode 100644 index 851d9db8e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/env-variables.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/events.png b/static/images/docs/project-user-guide/application-workloads/deployments/events.png deleted file mode 100644 index baa67df60..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/meta-data.png b/static/images/docs/project-user-guide/application-workloads/deployments/meta-data.png deleted file mode 100644 index 19736fdfe..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/meta-data.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/minus-icon.png b/static/images/docs/project-user-guide/application-workloads/deployments/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/monitoring.png b/static/images/docs/project-user-guide/application-workloads/deployments/monitoring.png deleted file mode 100644 index 8a5649925..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/monitoring.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/mount-volumes.png b/static/images/docs/project-user-guide/application-workloads/deployments/mount-volumes.png deleted file mode 100644 index 376447bc8..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/mount-volumes.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/plus-icon.png b/static/images/docs/project-user-guide/application-workloads/deployments/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/pods.png b/static/images/docs/project-user-guide/application-workloads/deployments/pods.png deleted file mode 100644 index bc93393f2..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/pods.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/resource-status.png b/static/images/docs/project-user-guide/application-workloads/deployments/resource-status.png deleted file mode 100644 index 7e59fb2c9..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/set-replicas.png b/static/images/docs/project-user-guide/application-workloads/deployments/set-replicas.png deleted file mode 100644 index bcad03aa2..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/set-replicas.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/set-requests-limits.png b/static/images/docs/project-user-guide/application-workloads/deployments/set-requests-limits.png deleted file mode 100644 index c660dcd99..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/set-requests-limits.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/three-dots.png b/static/images/docs/project-user-guide/application-workloads/deployments/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/time-interval.png b/static/images/docs/project-user-guide/application-workloads/deployments/time-interval.png deleted file mode 100644 index 146c03cd7..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/deployments/time-interval.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/deployments/up-arrow.png b/static/images/docs/project-user-guide/application-workloads/deployments/up-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png deleted file mode 100644 index ca726c84f..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png deleted file mode 100644 index ed529e108..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png deleted file mode 100644 index 3078b46ca..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png deleted file mode 100644 index a43d546f3..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png deleted file mode 100644 index e1b4b20fc..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png deleted file mode 100644 index 1db95af0b..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png deleted file mode 100644 index 6e70af591..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png deleted file mode 100644 index 243e221a0..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png deleted file mode 100644 index 755f87bc6..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png deleted file mode 100644 index 4401618ed..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png deleted file mode 100644 index 3b735d729..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png deleted file mode 100644 index 4d119a730..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png deleted file mode 100644 index 5bccf662e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png deleted file mode 100644 index 998c66293..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png deleted file mode 100644 index 26f03b5ca..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png deleted file mode 100644 index e277b8006..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/three-dots.png b/static/images/docs/project-user-guide/application-workloads/horizontal-pod-autoscaling/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/basic-info.png b/static/images/docs/project-user-guide/application-workloads/jobs/basic-info.png deleted file mode 100644 index af84dbe85..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/basic-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/click-create.png b/static/images/docs/project-user-guide/application-workloads/jobs/click-create.png deleted file mode 100644 index a845cf3bc..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/click-create.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/container-log-icon.png b/static/images/docs/project-user-guide/application-workloads/jobs/container-log-icon.png new file mode 100644 index 000000000..3f8ff8891 Binary files /dev/null and b/static/images/docs/project-user-guide/application-workloads/jobs/container-log-icon.png differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/display.png b/static/images/docs/project-user-guide/application-workloads/jobs/display.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/down-arrow.png b/static/images/docs/project-user-guide/application-workloads/jobs/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/env-variables.png b/static/images/docs/project-user-guide/application-workloads/jobs/env-variables.png deleted file mode 100644 index 6020cc504..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/env-variables.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/events.png b/static/images/docs/project-user-guide/application-workloads/jobs/events.png deleted file mode 100644 index 74dae55b6..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/exe-records.png b/static/images/docs/project-user-guide/application-workloads/jobs/exe-records.png deleted file mode 100644 index f90306c0b..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/exe-records.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/execution-records.png b/static/images/docs/project-user-guide/application-workloads/jobs/execution-records.png deleted file mode 100644 index 6db0d5414..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/execution-records.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/hide.png b/static/images/docs/project-user-guide/application-workloads/jobs/hide.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/job-in-list.png b/static/images/docs/project-user-guide/application-workloads/jobs/job-in-list.png deleted file mode 100644 index f97a020a8..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/job-in-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/job-settings.png b/static/images/docs/project-user-guide/application-workloads/jobs/job-settings.png deleted file mode 100644 index e8f83dc92..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/job-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/log.png b/static/images/docs/project-user-guide/application-workloads/jobs/log.png deleted file mode 100644 index 756f8422b..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/log.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/metadata.png b/static/images/docs/project-user-guide/application-workloads/jobs/metadata.png deleted file mode 100644 index 4f621af69..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/modify-job.png b/static/images/docs/project-user-guide/application-workloads/jobs/modify-job.png deleted file mode 100644 index abcafab2d..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/modify-job.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/refresh.png b/static/images/docs/project-user-guide/application-workloads/jobs/refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/res-status.png b/static/images/docs/project-user-guide/application-workloads/jobs/res-status.png deleted file mode 100644 index 0d8d3c233..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/res-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/resource-status.png b/static/images/docs/project-user-guide/application-workloads/jobs/resource-status.png deleted file mode 100644 index cb8621916..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/restart-policy.png b/static/images/docs/project-user-guide/application-workloads/jobs/restart-policy.png deleted file mode 100644 index 52f5dfa94..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/restart-policy.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/set-image.png b/static/images/docs/project-user-guide/application-workloads/jobs/set-image.png deleted file mode 100644 index 3f2ac60c5..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/set-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/jobs/start-command.png b/static/images/docs/project-user-guide/application-workloads/jobs/start-command.png deleted file mode 100644 index d13d03246..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/jobs/start-command.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/access-method-loadbalancer.png b/static/images/docs/project-user-guide/application-workloads/routes/access-method-loadbalancer.png deleted file mode 100644 index a323c7cce..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/access-method-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/access-method-nodeport.png b/static/images/docs/project-user-guide/application-workloads/routes/access-method-nodeport.png deleted file mode 100644 index 139f81326..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/access-method-nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/access-route-loadbalancer.png b/static/images/docs/project-user-guide/application-workloads/routes/access-route-loadbalancer.png deleted file mode 100644 index 362ca43b3..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/access-route-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/access-route-nodeport.png b/static/images/docs/project-user-guide/application-workloads/routes/access-route-nodeport.png deleted file mode 100644 index 5dded04b1..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/access-route-nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/add-metadata.png b/static/images/docs/project-user-guide/application-workloads/routes/add-metadata.png deleted file mode 100644 index 5048264b1..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/add-metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/auto-generate.png b/static/images/docs/project-user-guide/application-workloads/routes/auto-generate.png deleted file mode 100644 index 5e98286b5..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/auto-generate.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/basic-info.png b/static/images/docs/project-user-guide/application-workloads/routes/basic-info.png deleted file mode 100644 index 1ef9fed64..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/basic-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/create-route.png b/static/images/docs/project-user-guide/application-workloads/routes/create-route.png deleted file mode 100644 index ce75f4947..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/create-route.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/edit-route.png b/static/images/docs/project-user-guide/application-workloads/routes/edit-route.png deleted file mode 100644 index 13d7db7bc..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/edit-route.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/events.png b/static/images/docs/project-user-guide/application-workloads/routes/events.png deleted file mode 100644 index cdc2ccd93..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/metadata.png b/static/images/docs/project-user-guide/application-workloads/routes/metadata.png deleted file mode 100644 index 01979d510..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png b/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png deleted file mode 100644 index 81461730c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-nodeport.png b/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-nodeport.png deleted file mode 100644 index 3b5a66033..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/obtain-address-nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/resource-status.png b/static/images/docs/project-user-guide/application-workloads/routes/resource-status.png deleted file mode 100644 index 731d82c66..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/route-list.png b/static/images/docs/project-user-guide/application-workloads/routes/route-list.png deleted file mode 100644 index 50f036fe1..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/route-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/set-gateway.png b/static/images/docs/project-user-guide/application-workloads/routes/set-gateway.png deleted file mode 100644 index 5f10cddd2..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/set-gateway.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/routes/specify-domain.png b/static/images/docs/project-user-guide/application-workloads/routes/specify-domain.png deleted file mode 100644 index 7f34d600f..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/routes/specify-domain.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/advanced-settings.png b/static/images/docs/project-user-guide/application-workloads/services/advanced-settings.png deleted file mode 100644 index 2412eab5c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/click-create.png b/static/images/docs/project-user-guide/application-workloads/services/click-create.png deleted file mode 100644 index 729037d49..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/click-create.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/click-dots.png b/static/images/docs/project-user-guide/application-workloads/services/click-dots.png deleted file mode 100644 index 68391626d..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/click-dots.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/detail-page.png b/static/images/docs/project-user-guide/application-workloads/services/detail-page.png deleted file mode 100644 index fa1a58b1c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/detail-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/display.png b/static/images/docs/project-user-guide/application-workloads/services/display.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/services/enter-name.png b/static/images/docs/project-user-guide/application-workloads/services/enter-name.png deleted file mode 100644 index 17244952b..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/enter-name.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/events.png b/static/images/docs/project-user-guide/application-workloads/services/events.png deleted file mode 100644 index 39d090604..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/hide.png b/static/images/docs/project-user-guide/application-workloads/services/hide.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/services/metadata.png b/static/images/docs/project-user-guide/application-workloads/services/metadata.png deleted file mode 100644 index fcee5bd02..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/refresh.png b/static/images/docs/project-user-guide/application-workloads/services/refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/services/resource-status.png b/static/images/docs/project-user-guide/application-workloads/services/resource-status.png deleted file mode 100644 index bfeab4c46..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/see-pod.png b/static/images/docs/project-user-guide/application-workloads/services/see-pod.png deleted file mode 100644 index d94fbc273..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/see-pod.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/service-types.png b/static/images/docs/project-user-guide/application-workloads/services/service-types.png deleted file mode 100644 index 69b4034e9..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/service-types.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/set-image.png b/static/images/docs/project-user-guide/application-workloads/services/set-image.png deleted file mode 100644 index ae8cb4fc5..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/set-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/set-volumes.png b/static/images/docs/project-user-guide/application-workloads/services/set-volumes.png deleted file mode 100644 index 9bf6f1b18..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/set-volumes.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/stateless-service.png b/static/images/docs/project-user-guide/application-workloads/services/stateless-service.png deleted file mode 100644 index ef8b3b7c2..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/services/stateless-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/services/three-dots.png b/static/images/docs/project-user-guide/application-workloads/services/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/advanced-settings.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/advanced-settings.png deleted file mode 100644 index 7ebb8d753..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-add-image.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/click-add-image.png deleted file mode 100644 index ad0e99e27..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-add-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-create.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/click-create.png deleted file mode 100644 index d18bbe93e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-create.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-dots.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/click-dots.png deleted file mode 100644 index 71e9fc7cb..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-dots.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-more.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/click-more.png deleted file mode 100644 index eb9e6f314..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/click-more.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/detail-page.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/detail-page.png deleted file mode 100644 index f86963f60..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/detail-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/down-arrow.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-name.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-name.png deleted file mode 100644 index e500581ce..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-name.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-nginx.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-nginx.png deleted file mode 100644 index bbc3d6fb7..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/enter-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/env-variables.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/env-variables.png deleted file mode 100644 index 96f0adb39..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/env-variables.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/events.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/events.png deleted file mode 100644 index 99f4b121c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/events.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/metadata.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/metadata.png deleted file mode 100644 index 7180877b7..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/metadata.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/minus-icon.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/monitoring.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/monitoring.png deleted file mode 100644 index 1e95f73d9..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/monitoring.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/mount-volume.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/mount-volume.png deleted file mode 100644 index acdc0c93e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/mount-volume.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/plus-icon.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/pod-detail.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/pod-detail.png deleted file mode 100644 index 8ea6d4f17..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/pod-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/refresh.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/resource-status.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/resource-status.png deleted file mode 100644 index 1b1e2d37c..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/resource-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-interval.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/set-interval.png deleted file mode 100644 index f11b9624a..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-interval.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-replicas.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/set-replicas.png deleted file mode 100644 index 82044c76a..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-replicas.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-requests-limits.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/set-requests-limits.png deleted file mode 100644 index 1c00ac91e..000000000 Binary files a/static/images/docs/project-user-guide/application-workloads/statefulsets/set-requests-limits.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/start-refresh.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/start-refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/stop-refresh.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/stop-refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/three-dots.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/application-workloads/statefulsets/up-arrow.png b/static/images/docs/project-user-guide/application-workloads/statefulsets/up-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/project-user-guide/applications/app-templates/app-store.png b/static/images/docs/project-user-guide/applications/app-templates/app-store.png deleted file mode 100644 index cb242b8cb..000000000 Binary files a/static/images/docs/project-user-guide/applications/app-templates/app-store.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/app-templates/private-app-repository.png b/static/images/docs/project-user-guide/applications/app-templates/private-app-repository.png deleted file mode 100644 index 4d677f3b8..000000000 Binary files a/static/images/docs/project-user-guide/applications/app-templates/private-app-repository.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png b/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png deleted file mode 100644 index c89c1f48b..000000000 Binary files a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/dashboard.png b/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/dashboard.png deleted file mode 100644 index 3c87be1e5..000000000 Binary files a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/dashboard.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/review-page.png b/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/review-page.png deleted file mode 100644 index d1aa00aa4..000000000 Binary files a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/review-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/route.png b/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/route.png deleted file mode 100644 index 8d70ef200..000000000 Binary files a/static/images/docs/project-user-guide/applications/create-a-microservices-based-app/route.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png deleted file mode 100644 index 1b23e833c..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png deleted file mode 100644 index 626c2f183..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png deleted file mode 100644 index 91caa2a4e..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png deleted file mode 100644 index 415e95758..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png deleted file mode 100644 index 530d74674..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png deleted file mode 100644 index 17c661acd..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png deleted file mode 100644 index 2e78e56e9..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png deleted file mode 100644 index 3a394bef8..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png deleted file mode 100644 index 12d5ec460..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png deleted file mode 100644 index 62a80d2b5..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png deleted file mode 100644 index c74fa1abd..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png deleted file mode 100644 index dd2ec32ac..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png deleted file mode 100644 index b5a3282d1..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png deleted file mode 100644 index 3d0e2ba94..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png deleted file mode 100644 index 0d9d2f826..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png deleted file mode 100644 index 2e7feaa69..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png deleted file mode 100644 index 6e8c5f96b..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png deleted file mode 100644 index 27d6f41a2..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png deleted file mode 100644 index 57004b314..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png deleted file mode 100644 index da3c9c1e0..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png deleted file mode 100644 index 0983df953..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png deleted file mode 100644 index 00655e485..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png deleted file mode 100644 index ba62277de..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png deleted file mode 100644 index 4468a6c10..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png deleted file mode 100644 index 24b4dbfbf..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png deleted file mode 100644 index 01b157b76..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png deleted file mode 100644 index 909418853..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png deleted file mode 100644 index b48652d85..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png deleted file mode 100644 index 7e038a66f..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png b/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png deleted file mode 100644 index 037efce49..000000000 Binary files a/static/images/docs/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/configmaps/detail-page.png b/static/images/docs/project-user-guide/configurations/configmaps/detail-page.png deleted file mode 100644 index 0f9b82ece..000000000 Binary files a/static/images/docs/project-user-guide/configurations/configmaps/detail-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/configmaps/key-value.png b/static/images/docs/project-user-guide/configurations/configmaps/key-value.png deleted file mode 100644 index bcf489c41..000000000 Binary files a/static/images/docs/project-user-guide/configurations/configmaps/key-value.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/configmaps/use-configmap.jpg b/static/images/docs/project-user-guide/configurations/configmaps/use-configmap.jpg deleted file mode 100644 index 5c5c5698e..000000000 Binary files a/static/images/docs/project-user-guide/configurations/configmaps/use-configmap.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/create-secret.png b/static/images/docs/project-user-guide/configurations/image-registries/create-secret.png deleted file mode 100644 index 55b4afeae..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/create-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/harbor-address.png b/static/images/docs/project-user-guide/configurations/image-registries/harbor-address.png deleted file mode 100644 index d9e564c95..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/harbor-address.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/image-registry-info.png b/static/images/docs/project-user-guide/configurations/image-registries/image-registry-info.png deleted file mode 100644 index e3dd0c016..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/image-registry-info.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/open-dashboard.png b/static/images/docs/project-user-guide/configurations/image-registries/open-dashboard.png deleted file mode 100644 index a90dcb386..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/open-dashboard.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/qingcloud-registry.jpg b/static/images/docs/project-user-guide/configurations/image-registries/qingcloud-registry.jpg deleted file mode 100644 index 419b21461..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/qingcloud-registry.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/use-image-registry.png b/static/images/docs/project-user-guide/configurations/image-registries/use-image-registry.png deleted file mode 100644 index b49394be8..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/use-image-registry.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/image-registries/validate-registry-address.png b/static/images/docs/project-user-guide/configurations/image-registries/validate-registry-address.png deleted file mode 100644 index 8f3e97d3a..000000000 Binary files a/static/images/docs/project-user-guide/configurations/image-registries/validate-registry-address.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/account-password-secret.png b/static/images/docs/project-user-guide/configurations/secrets/account-password-secret.png deleted file mode 100644 index 35a36079c..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/account-password-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/add-data.jpg b/static/images/docs/project-user-guide/configurations/secrets/add-data.jpg deleted file mode 100644 index e10aac06f..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/add-data.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/click-more.png b/static/images/docs/project-user-guide/configurations/secrets/click-more.png deleted file mode 100644 index f2b5af0eb..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/click-more.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/create-secrets.png b/static/images/docs/project-user-guide/configurations/secrets/create-secrets.png deleted file mode 100644 index 349136f4d..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/create-secrets.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/default-secret.png b/static/images/docs/project-user-guide/configurations/secrets/default-secret.png deleted file mode 100644 index f2857b962..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/default-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/docker-hub-secret.png b/static/images/docs/project-user-guide/configurations/secrets/docker-hub-secret.png deleted file mode 100644 index a96fded74..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/docker-hub-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/enter-key.png b/static/images/docs/project-user-guide/configurations/secrets/enter-key.png deleted file mode 100644 index d5d73a70c..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/enter-key.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/github-secret.png b/static/images/docs/project-user-guide/configurations/secrets/github-secret.png deleted file mode 100644 index b3841ddc4..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/github-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/image-registry-secret.png b/static/images/docs/project-user-guide/configurations/secrets/image-registry-secret.png deleted file mode 100644 index 0b8bb7d7a..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/image-registry-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/secret-detail-page.png b/static/images/docs/project-user-guide/configurations/secrets/secret-detail-page.png deleted file mode 100644 index 0626981ca..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/secret-detail-page.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/secret-list.png b/static/images/docs/project-user-guide/configurations/secrets/secret-list.png deleted file mode 100644 index c9038e180..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/secret-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/secret-type.png b/static/images/docs/project-user-guide/configurations/secrets/secret-type.png deleted file mode 100644 index f5d99d297..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/secret-type.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/set-secret.png b/static/images/docs/project-user-guide/configurations/secrets/set-secret.png deleted file mode 100644 index ae13f9a83..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/set-secret.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/tls.png b/static/images/docs/project-user-guide/configurations/secrets/tls.png deleted file mode 100644 index 287f34bd1..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/tls.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/use-secret-image.png b/static/images/docs/project-user-guide/configurations/secrets/use-secret-image.png deleted file mode 100644 index 8d12ccb28..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/use-secret-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/configurations/secrets/use-secret-repository.png b/static/images/docs/project-user-guide/configurations/secrets/use-secret-repository.png deleted file mode 100644 index a0ed32b16..000000000 Binary files a/static/images/docs/project-user-guide/configurations/secrets/use-secret-repository.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/app-template-create.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/app-template-create.jpg deleted file mode 100644 index 157c15792..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/app-template-create.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-create-app-template.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-create-app-template.jpg deleted file mode 100644 index 2d6163790..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-create-app-template.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-2.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-2.jpg deleted file mode 100644 index 8a6ce3b7f..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-2.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-4.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-4.jpg deleted file mode 100644 index d5e53b5c8..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-4.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-5.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-5.jpg deleted file mode 100644 index bc9e188c9..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-5.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-6.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-6.jpg deleted file mode 100644 index 062b73044..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-6.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template.jpg deleted file mode 100644 index 90a66b3c2..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-1.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-1.jpg deleted file mode 100644 index 3469da867..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-1.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-10.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-10.jpg deleted file mode 100644 index 2fcf39f55..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-10.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-2.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-2.jpg deleted file mode 100644 index a15d3402e..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-2.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-3.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-3.jpg deleted file mode 100644 index 8292e2fd7..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-3.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-4.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-4.jpg deleted file mode 100644 index caff22b82..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-4.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-5.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-5.jpg deleted file mode 100644 index 08dea47c4..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-5.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-6.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-6.jpg deleted file mode 100644 index 209f35587..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-6.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-7.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-7.jpg deleted file mode 100644 index 2ec537c83..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-7.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-8.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-8.jpg deleted file mode 100644 index 1a7da41ee..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-8.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-9.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-9.jpg deleted file mode 100644 index 200102b0a..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-9.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-1.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-1.jpg deleted file mode 100644 index 4a8fa3dd2..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-1.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-2.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-2.jpg deleted file mode 100644 index 7c4457dac..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-2.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-3.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-3.jpg deleted file mode 100644 index 86c96ac98..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-3.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-4.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-4.jpg deleted file mode 100644 index 2b9d94f9a..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-4.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-5.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-5.jpg deleted file mode 100644 index 151a1a486..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-5.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-6.jpg b/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-6.jpg deleted file mode 100644 index d947ccba9..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-6.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-app-configurations.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-app-configurations.png deleted file mode 100644 index 7372c199b..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-app-configurations.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-dashboards.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-dashboards.png deleted file mode 100644 index f03ed7359..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-dashboards.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png deleted file mode 100644 index 5bf4a88ee..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png deleted file mode 100644 index dc0c4cbba..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png deleted file mode 100644 index 56eb750ef..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-app/plus-icon.png b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-app/plus-icon.png new file mode 100644 index 000000000..43f67656e Binary files /dev/null and b/static/images/docs/project-user-guide/custom-application-monitoring/examples/monitor-sample-app/plus-icon.png differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png deleted file mode 100644 index 4047c36d0..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png deleted file mode 100644 index 03bcbe157..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/add-charts.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/add-charts.png deleted file mode 100644 index 9bbe6db9d..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/add-charts.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png deleted file mode 100644 index 9adac97f9..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/detail-column.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/detail-column.png deleted file mode 100644 index 4cc3f1fd0..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/detail-column.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-mode.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-mode.png deleted file mode 100644 index 689a705be..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-mode.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-settings.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-settings.png deleted file mode 100644 index 466a72d77..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/edit-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png deleted file mode 100644 index 9e0420e17..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/text-charts.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/text-charts.png deleted file mode 100644 index 49ba9dd26..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/text-charts.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/view-charts.png b/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/view-charts.png deleted file mode 100644 index 639933cc4..000000000 Binary files a/static/images/docs/project-user-guide/custom-application-monitoring/visualization/overview/view-charts.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.png deleted file mode 100644 index ec252549d..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.png deleted file mode 100644 index 5889a2f61..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.png deleted file mode 100644 index 03b1b8e88..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.png deleted file mode 100644 index 70e2df48e..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.png deleted file mode 100644 index 8a061ee3b..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.png deleted file mode 100644 index aa8006cac..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.png deleted file mode 100644 index 7f922d317..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.png deleted file mode 100644 index 415293857..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.png deleted file mode 100644 index 28d516fb5..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.png deleted file mode 100644 index c3c340d16..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/finish-canary-release.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/finish-canary-release.png deleted file mode 100644 index 99dcbc1b7..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/finish-canary-release.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-release.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-release.png deleted file mode 100644 index 778e0d7c0..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-release.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/topology.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/topology.png deleted file mode 100644 index 6c1eb1163..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/topology.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png deleted file mode 100644 index dcd196d9d..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing.png deleted file mode 100644 index e42910f2d..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/tracing.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic-management.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic-management.png deleted file mode 100644 index c95cd4da3..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic-management.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic.png deleted file mode 100644 index 74c9f5789..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/canary-release/traffic.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.png b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.png deleted file mode 100644 index 9a501a8f7..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.png b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.png deleted file mode 100644 index 10a99ed48..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.png b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.png deleted file mode 100644 index 76c7ec025..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.png b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.png deleted file mode 100644 index fd22ceb01..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.png b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.png deleted file mode 100644 index a582859d5..000000000 Binary files a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/access-service.jpg b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/access-service.jpg deleted file mode 100644 index 71b475a0d..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/access-service.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/advanced-settings.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/advanced-settings.png deleted file mode 100644 index bf0aa5e05..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/buidling-settings-2.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/buidling-settings-2.png deleted file mode 100644 index 330b73dc5..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/buidling-settings-2.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-settings.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-settings.png deleted file mode 100644 index 937e1ff1d..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building-status.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building-status.png deleted file mode 100644 index 3d81509dc..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building-status.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building.png deleted file mode 100644 index b77a181c5..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/container-settings.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/container-settings.png deleted file mode 100644 index d9ec24a4a..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/container-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/create-service.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/create-service.png deleted file mode 100644 index 7632cdf77..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/create-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/deployment.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/deployment.png deleted file mode 100644 index 7b3472ad5..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image-pushed.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image-pushed.png deleted file mode 100644 index 90c92bb03..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image-pushed.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image.png deleted file mode 100644 index 515f65678..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/exposed-port.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/exposed-port.png deleted file mode 100644 index 709f33604..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/exposed-port.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-builder.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-builder.png deleted file mode 100644 index 1a64545a2..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-builder.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-success.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-success.png deleted file mode 100644 index b3a357820..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-success.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-log.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-log.png deleted file mode 100644 index c769514f8..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-log.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-logs.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-logs.png deleted file mode 100644 index da0eb9fd3..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-logs.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job-created.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job-created.png deleted file mode 100644 index ed9aadc90..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job-created.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job.png deleted file mode 100644 index aeecfc408..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service.png deleted file mode 100644 index e83479adf..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/successful.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/successful.png deleted file mode 100644 index 08de86f24..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/successful.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/upload-artifact.png b/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/upload-artifact.png deleted file mode 100644 index 08c807051..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/upload-artifact.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/add-payload-url.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/add-payload-url.png deleted file mode 100644 index 71ddeea0c..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/add-payload-url.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-auto-build.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-auto-build.png deleted file mode 100644 index b3f91bdae..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-auto-build.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-payload-url.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-payload-url.png deleted file mode 100644 index 240967807..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-payload-url.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-trigger-link.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-trigger-link.png deleted file mode 100644 index 6fa67e1fe..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/b2i-trigger-link.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-add-webhook.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-add-webhook.png deleted file mode 100644 index 3abcf4f9a..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-add-webhook.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-s2i.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-s2i.png deleted file mode 100644 index e444dd0cd..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/click-s2i.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/edit-trigger-service.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/edit-trigger-service.png deleted file mode 100644 index 1e0e41b2b..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/edit-trigger-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-auto-build.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-auto-build.png deleted file mode 100644 index 5629a9e5b..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-auto-build.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-nodeport.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-nodeport.png deleted file mode 100644 index a074f48f9..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-link.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-link.png deleted file mode 100644 index acf4dd490..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-link.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-service.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-service.png deleted file mode 100644 index adb6be3b3..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/s2i-trigger-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/select-nodeport.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/select-nodeport.png deleted file mode 100644 index cd2ddf53c..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/select-nodeport.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/webhook-delivery.png b/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/webhook-delivery.png deleted file mode 100644 index 250223bef..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-and-b2i-webhooks/webhook-delivery.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-log.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-log.png deleted file mode 100644 index 335b827de..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-log.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-settings.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-settings.png deleted file mode 100644 index fed8f4297..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/building.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/building.png deleted file mode 100644 index d0340f928..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/building.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/copy-repo-code.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/copy-repo-code.png deleted file mode 100644 index 66fd008c4..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/copy-repo-code.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-finish.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-finish.png deleted file mode 100644 index 1d766cb74..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-finish.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-service.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-service.png deleted file mode 100644 index 22282786b..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/deployment.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/deployment.png deleted file mode 100644 index 038f1312c..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/deployment.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/docker-image.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/docker-image.png deleted file mode 100644 index 26859563d..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/docker-image.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/fork-repository.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/fork-repository.png deleted file mode 100644 index b2cbe1079..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/fork-repository.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/health-checker.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/health-checker.png deleted file mode 100644 index a6efce609..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/health-checker.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/job.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/job.png deleted file mode 100644 index 0972ea7fd..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/job.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/select-lang-type.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/select-lang-type.png deleted file mode 100644 index 276398252..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/select-lang-type.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-detail.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-detail.png deleted file mode 100644 index 49b97df86..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-detail.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-settings.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-settings.png deleted file mode 100644 index efef50338..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-settings.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service.png deleted file mode 100644 index 59c1bd509..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/success-result.png b/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/success-result.png deleted file mode 100644 index 4e1f0a9a0..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/success-result.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-templates/access-nginx.png b/static/images/docs/project-user-guide/image-builder/s2i-templates/access-nginx.png deleted file mode 100644 index ec900371e..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-templates/access-nginx.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/image-builder/s2i-templates/template-available.png b/static/images/docs/project-user-guide/image-builder/s2i-templates/template-available.png deleted file mode 100644 index 2edbc54d8..000000000 Binary files a/static/images/docs/project-user-guide/image-builder/s2i-templates/template-available.png and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg b/static/images/docs/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg deleted file mode 100644 index a026e60e4..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/delete-volume.jpg b/static/images/docs/project-user-guide/volume-management/volumes/delete-volume.jpg deleted file mode 100644 index b9fb5f873..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/delete-volume.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/local-pending.jpg b/static/images/docs/project-user-guide/volume-management/volumes/local-pending.jpg deleted file mode 100644 index 7e687c7e1..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/local-pending.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/select-storage-class.jpg b/static/images/docs/project-user-guide/volume-management/volumes/select-storage-class.jpg deleted file mode 100644 index 516df9577..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/select-storage-class.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/volume-detail-page.jpg b/static/images/docs/project-user-guide/volume-management/volumes/volume-detail-page.jpg deleted file mode 100644 index 8775c31b3..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/volume-detail-page.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/volume-monitoring.jpg b/static/images/docs/project-user-guide/volume-management/volumes/volume-monitoring.jpg deleted file mode 100644 index 51737b7e5..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/volume-monitoring.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/volume-page.jpg b/static/images/docs/project-user-guide/volume-management/volumes/volume-page.jpg deleted file mode 100644 index 7cb9e8cbd..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/volume-page.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/volume-status.jpg b/static/images/docs/project-user-guide/volume-management/volumes/volume-status.jpg deleted file mode 100644 index d9e0df214..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/volume-status.jpg and /dev/null differ diff --git a/static/images/docs/project-user-guide/volume-management/volumes/volumebindingmode.jpg b/static/images/docs/project-user-guide/volume-management/volumes/volumebindingmode.jpg deleted file mode 100644 index ffe2335f3..000000000 Binary files a/static/images/docs/project-user-guide/volume-management/volumes/volumebindingmode.jpg and /dev/null differ diff --git a/static/images/docs/quickstart/all-in-one-installation/kubesphere-components.png b/static/images/docs/quickstart/all-in-one-installation/kubesphere-components.png deleted file mode 100644 index 3041ab89a..000000000 Binary files a/static/images/docs/quickstart/all-in-one-installation/kubesphere-components.png and /dev/null differ diff --git a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png b/static/images/docs/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png deleted file mode 100644 index bff9b0fe0..000000000 Binary files a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png and /dev/null differ diff --git a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/detail-page.png b/static/images/docs/quickstart/deploy-bookinfo-to-k8s/detail-page.png deleted file mode 100644 index b01cae082..000000000 Binary files a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/detail-page.png and /dev/null differ diff --git a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/normal-user.png b/static/images/docs/quickstart/deploy-bookinfo-to-k8s/normal-user.png deleted file mode 100644 index 0362a050d..000000000 Binary files a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/normal-user.png and /dev/null differ diff --git a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/running.png b/static/images/docs/quickstart/deploy-bookinfo-to-k8s/running.png deleted file mode 100644 index 956e098e4..000000000 Binary files a/static/images/docs/quickstart/deploy-bookinfo-to-k8s/running.png and /dev/null differ diff --git a/static/images/docs/quickstart/enable-pluggable-components/component-status-page.png b/static/images/docs/quickstart/enable-pluggable-components/component-status-page.png deleted file mode 100644 index 2220ffc98..000000000 Binary files a/static/images/docs/quickstart/enable-pluggable-components/component-status-page.png and /dev/null differ diff --git a/static/images/docs/quickstart/enable-pluggable-components/component-status.png b/static/images/docs/quickstart/enable-pluggable-components/component-status.png deleted file mode 100644 index 6de64d622..000000000 Binary files a/static/images/docs/quickstart/enable-pluggable-components/component-status.png and /dev/null differ diff --git a/static/images/docs/quickstart/kubesphere-components.png b/static/images/docs/quickstart/kubesphere-components.png deleted file mode 100644 index 9fbc16df5..000000000 Binary files a/static/images/docs/quickstart/kubesphere-components.png and /dev/null differ diff --git a/static/images/docs/quickstart/minimal-installation-on-k8s/kubesphere-components.png b/static/images/docs/quickstart/minimal-installation-on-k8s/kubesphere-components.png deleted file mode 100644 index e6085b8c9..000000000 Binary files a/static/images/docs/quickstart/minimal-installation-on-k8s/kubesphere-components.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/access-method.png b/static/images/docs/quickstart/wordpress-deployment/access-method.png deleted file mode 100644 index eb6906dfd..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/access-method.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-container.png b/static/images/docs/quickstart/wordpress-deployment/add-container.png deleted file mode 100644 index bcb313a8b..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-container.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-container1.png b/static/images/docs/quickstart/wordpress-deployment/add-container1.png deleted file mode 100644 index 3e91c3a29..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-container1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-service.png b/static/images/docs/quickstart/wordpress-deployment/add-service.png deleted file mode 100644 index d961d4e04..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-service.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-service1.png b/static/images/docs/quickstart/wordpress-deployment/add-service1.png deleted file mode 100644 index 63136aef6..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-service1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-volume-page.png b/static/images/docs/quickstart/wordpress-deployment/add-volume-page.png deleted file mode 100644 index 90b44516b..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-volume-page.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/add-volume-page1.png b/static/images/docs/quickstart/wordpress-deployment/add-volume-page1.png deleted file mode 100644 index ee41272bb..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/add-volume-page1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/advanced-settings.png b/static/images/docs/quickstart/wordpress-deployment/advanced-settings.png deleted file mode 100644 index 662df4b33..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/advanced-settings.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/advanced-settings1.png b/static/images/docs/quickstart/wordpress-deployment/advanced-settings1.png deleted file mode 100644 index 5e31decd4..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/advanced-settings1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/advanced.png b/static/images/docs/quickstart/wordpress-deployment/advanced.png deleted file mode 100644 index c98771db3..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/advanced.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/advanced1.png b/static/images/docs/quickstart/wordpress-deployment/advanced1.png deleted file mode 100644 index bc221bf5d..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/advanced1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/application-created.png b/static/images/docs/quickstart/wordpress-deployment/application-created.png deleted file mode 100644 index 315dc9a31..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/application-created.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/application-created1.png b/static/images/docs/quickstart/wordpress-deployment/application-created1.png deleted file mode 100644 index 3ac74cc3e..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/application-created1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/basic-info.png b/static/images/docs/quickstart/wordpress-deployment/basic-info.png deleted file mode 100644 index 665fa52b0..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/basic-info.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/basic-info1.png b/static/images/docs/quickstart/wordpress-deployment/basic-info1.png deleted file mode 100644 index fb2e4da5a..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/basic-info1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume.png b/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume.png deleted file mode 100644 index cf3fde411..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume1.png b/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume1.png deleted file mode 100644 index 8b83128d5..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/choose-existing-volume1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/components-finished.png b/static/images/docs/quickstart/wordpress-deployment/components-finished.png deleted file mode 100644 index de36460d0..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/components-finished.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/components-finished1.png b/static/images/docs/quickstart/wordpress-deployment/components-finished1.png deleted file mode 100644 index 66b6f18a5..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/components-finished1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/composing-app.png b/static/images/docs/quickstart/wordpress-deployment/composing-app.png deleted file mode 100644 index 95737e725..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/composing-app.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/composing-app1.png b/static/images/docs/quickstart/wordpress-deployment/composing-app1.png deleted file mode 100644 index 2467f3e4b..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/composing-app1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/container-image-page.png b/static/images/docs/quickstart/wordpress-deployment/container-image-page.png deleted file mode 100644 index 7aff5c093..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/container-image-page.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/container-image-page1.png b/static/images/docs/quickstart/wordpress-deployment/container-image-page1.png deleted file mode 100644 index 5094ca0c7..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/container-image-page1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/container-image.png b/static/images/docs/quickstart/wordpress-deployment/container-image.png deleted file mode 100644 index 74e1a658f..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/container-image.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/container-image1.png b/static/images/docs/quickstart/wordpress-deployment/container-image1.png deleted file mode 100644 index 5e0910cde..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/container-image1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/create-secrets.png b/static/images/docs/quickstart/wordpress-deployment/create-secrets.png deleted file mode 100644 index 3dc593bb9..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/create-secrets.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/create-secrets1.png b/static/images/docs/quickstart/wordpress-deployment/create-secrets1.png deleted file mode 100644 index 76eb14dbb..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/create-secrets1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/edit-internet-access.png b/static/images/docs/quickstart/wordpress-deployment/edit-internet-access.png deleted file mode 100644 index e7a719b73..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/edit-internet-access1.png b/static/images/docs/quickstart/wordpress-deployment/edit-internet-access1.png deleted file mode 100644 index fcf9a9823..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/edit-internet-access1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/environment-var.png b/static/images/docs/quickstart/wordpress-deployment/environment-var.png deleted file mode 100644 index 0530242a9..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/environment-var.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/environment-var1.png b/static/images/docs/quickstart/wordpress-deployment/environment-var1.png deleted file mode 100644 index d7d06fc35..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/environment-var1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/environment-varss.png b/static/images/docs/quickstart/wordpress-deployment/environment-varss.png deleted file mode 100644 index acd8f13e5..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/environment-varss.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/environment-varss1.png b/static/images/docs/quickstart/wordpress-deployment/environment-varss1.png deleted file mode 100644 index 621a5d835..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/environment-varss1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/ingress-create.png b/static/images/docs/quickstart/wordpress-deployment/ingress-create.png deleted file mode 100644 index 56ef9ba9f..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/ingress-create.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/ingress-create1.png b/static/images/docs/quickstart/wordpress-deployment/ingress-create1.png deleted file mode 100644 index 5b7c7f320..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/ingress-create1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/key-value.png b/static/images/docs/quickstart/wordpress-deployment/key-value.png deleted file mode 100644 index 1931ef75b..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/key-value.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/key-value1.png b/static/images/docs/quickstart/wordpress-deployment/key-value1.png deleted file mode 100644 index f09c5a3fc..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/key-value1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mount-volume-page.png b/static/images/docs/quickstart/wordpress-deployment/mount-volume-page.png deleted file mode 100644 index 8cfa3e277..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mount-volume-page.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mount-volume-page1.png b/static/images/docs/quickstart/wordpress-deployment/mount-volume-page1.png deleted file mode 100644 index a88339337..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mount-volume-page1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysql-finished.png b/static/images/docs/quickstart/wordpress-deployment/mysql-finished.png deleted file mode 100644 index ff817efb4..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysql-finished.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysql-finished1.png b/static/images/docs/quickstart/wordpress-deployment/mysql-finished1.png deleted file mode 100644 index c7039e500..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysql-finished1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysql-running.png b/static/images/docs/quickstart/wordpress-deployment/mysql-running.png deleted file mode 100644 index d49e52b83..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysql-running.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysql-running1.png b/static/images/docs/quickstart/wordpress-deployment/mysql-running1.png deleted file mode 100644 index 460d594d4..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysql-running1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysqlname.png b/static/images/docs/quickstart/wordpress-deployment/mysqlname.png deleted file mode 100644 index 495966c81..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysqlname.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/mysqlname1.png b/static/images/docs/quickstart/wordpress-deployment/mysqlname1.png deleted file mode 100644 index 65c70e4a1..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/mysqlname1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/name-wordpress-1.png b/static/images/docs/quickstart/wordpress-deployment/name-wordpress-1.png deleted file mode 100644 index c26639a48..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/name-wordpress-1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/name-wordpress.png b/static/images/docs/quickstart/wordpress-deployment/name-wordpress.png deleted file mode 100644 index d4f92f5af..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/name-wordpress.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/nodeport-number.png b/static/images/docs/quickstart/wordpress-deployment/nodeport-number.png deleted file mode 100644 index 7c6e41be4..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/nodeport-number.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/nodeport-number1.png b/static/images/docs/quickstart/wordpress-deployment/nodeport-number1.png deleted file mode 100644 index 73f007c85..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/nodeport-number1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volume-settings.png b/static/images/docs/quickstart/wordpress-deployment/volume-settings.png deleted file mode 100644 index e8265ffec..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volume-settings.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volume-settings1.png b/static/images/docs/quickstart/wordpress-deployment/volume-settings1.png deleted file mode 100644 index 2962b9653..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volume-settings1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volume-template.jpg b/static/images/docs/quickstart/wordpress-deployment/volume-template.jpg deleted file mode 100644 index 3adfe01b0..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volume-template.jpg and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volume-template1.png b/static/images/docs/quickstart/wordpress-deployment/volume-template1.png deleted file mode 100644 index e3ef2722f..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volume-template1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volumes.png b/static/images/docs/quickstart/wordpress-deployment/volumes.png deleted file mode 100644 index 162db22d0..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volumes.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/volumes1.png b/static/images/docs/quickstart/wordpress-deployment/volumes1.png deleted file mode 100644 index 5b7f71868..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/volumes1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment.png b/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment.png deleted file mode 100644 index 036f58fb9..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment1.png b/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment1.png deleted file mode 100644 index d021439ae..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/wordpress-deployment1.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets.png b/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets.png deleted file mode 100644 index 95b56e9b0..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets.png and /dev/null differ diff --git a/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets1.png b/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets1.png deleted file mode 100644 index af48cbc6a..000000000 Binary files a/static/images/docs/quickstart/wordpress-deployment/wordpress-secrets1.png and /dev/null differ diff --git a/static/images/docs/toolbox/alerting-archiving-rule.jpg b/static/images/docs/toolbox/alerting-archiving-rule.jpg deleted file mode 100644 index 9cfb83d34..000000000 Binary files a/static/images/docs/toolbox/alerting-archiving-rule.jpg and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-crd.jpg b/static/images/docs/toolbox/auditing-crd.jpg deleted file mode 100644 index d80c77dd2..000000000 Binary files a/static/images/docs/toolbox/auditing-crd.jpg and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-log-filter.png b/static/images/docs/toolbox/auditing-log-filter.png deleted file mode 100644 index d3303c60d..000000000 Binary files a/static/images/docs/toolbox/auditing-log-filter.png and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-operating.png b/static/images/docs/toolbox/auditing-operating.png deleted file mode 100644 index c1b691284..000000000 Binary files a/static/images/docs/toolbox/auditing-operating.png and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-query/auditing-log-details.png b/static/images/docs/toolbox/auditing-query/auditing-log-details.png deleted file mode 100644 index 4e81f3c0a..000000000 Binary files a/static/images/docs/toolbox/auditing-query/auditing-log-details.png and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-query/auditing-logs.png b/static/images/docs/toolbox/auditing-query/auditing-logs.png deleted file mode 100644 index f3624f88b..000000000 Binary files a/static/images/docs/toolbox/auditing-query/auditing-logs.png and /dev/null differ diff --git a/static/images/docs/toolbox/auditing-query/services-created.png b/static/images/docs/toolbox/auditing-query/services-created.png deleted file mode 100644 index 481e92369..000000000 Binary files a/static/images/docs/toolbox/auditing-query/services-created.png and /dev/null differ diff --git a/static/images/docs/toolbox/event-query/event-details.png b/static/images/docs/toolbox/event-query/event-details.png deleted file mode 100644 index f177dabd2..000000000 Binary files a/static/images/docs/toolbox/event-query/event-details.png and /dev/null differ diff --git a/static/images/docs/toolbox/event-query/event-search-list.png b/static/images/docs/toolbox/event-query/event-search-list.png deleted file mode 100644 index 3b52fa032..000000000 Binary files a/static/images/docs/toolbox/event-query/event-search-list.png and /dev/null differ diff --git a/static/images/docs/toolbox/event-query/event-search.png b/static/images/docs/toolbox/event-query/event-search.png deleted file mode 100644 index 00bc7f8ff..000000000 Binary files a/static/images/docs/toolbox/event-query/event-search.png and /dev/null differ diff --git a/static/images/docs/toolbox/index/toolbox.png b/static/images/docs/toolbox/index/toolbox.png deleted file mode 100644 index 7a03a7ff4..000000000 Binary files a/static/images/docs/toolbox/index/toolbox.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/enable-billing/metering-and-billing.png b/static/images/docs/toolbox/metering-and-billing/enable-billing/metering-and-billing.png deleted file mode 100644 index 90f8d67ce..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/enable-billing/metering-and-billing.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png deleted file mode 100644 index 39951532d..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/node-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/node-page.png deleted file mode 100644 index 44a0a35a3..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/node-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/pod-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/pod-page.png deleted file mode 100644 index 147f7ef3f..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/pod-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/project-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/project-page.png deleted file mode 100644 index f30706ed5..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/project-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workload-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workload-page.png deleted file mode 100644 index dd96ce1fa..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workload-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png b/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png deleted file mode 100644 index 6622d7f52..000000000 Binary files a/static/images/docs/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png and /dev/null differ diff --git a/static/images/docs/toolbox/user-changed.png b/static/images/docs/toolbox/user-changed.png deleted file mode 100644 index 8be3eb554..000000000 Binary files a/static/images/docs/toolbox/user-changed.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-info-dialogue.png b/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-info-dialogue.png deleted file mode 100644 index 742a26161..000000000 Binary files a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-info-dialogue.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo-list.png b/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo-list.png deleted file mode 100644 index c475b898e..000000000 Binary files a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo-list.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo.png b/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo.png deleted file mode 100644 index 01d1ac07b..000000000 Binary files a/static/images/docs/workspace-administration/app-repository/import-helm-repository/app-repo.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/app-repository/import-helm-repository/validate-link.png b/static/images/docs/workspace-administration/app-repository/import-helm-repository/validate-link.png deleted file mode 100644 index 085a0a196..000000000 Binary files a/static/images/docs/workspace-administration/app-repository/import-helm-repository/validate-link.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/project-quotas/project-quotas.png b/static/images/docs/workspace-administration/project-quotas/project-quotas.png deleted file mode 100644 index 9a193d86e..000000000 Binary files a/static/images/docs/workspace-administration/project-quotas/project-quotas.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/project-quotas/set-other-resouce-quotas.png b/static/images/docs/workspace-administration/project-quotas/set-other-resouce-quotas.png deleted file mode 100644 index 363c67535..000000000 Binary files a/static/images/docs/workspace-administration/project-quotas/set-other-resouce-quotas.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/project-quotas/set-project-quotas.png b/static/images/docs/workspace-administration/project-quotas/set-project-quotas.png deleted file mode 100644 index 664b0f12a..000000000 Binary files a/static/images/docs/workspace-administration/project-quotas/set-project-quotas.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/role-and-member-management/edit-existing-user.png b/static/images/docs/workspace-administration/role-and-member-management/edit-existing-user.png deleted file mode 100644 index 9feb6b14d..000000000 Binary files a/static/images/docs/workspace-administration/role-and-member-management/edit-existing-user.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/role-and-member-management/role-list.png b/static/images/docs/workspace-administration/role-and-member-management/role-list.png deleted file mode 100644 index 764101994..000000000 Binary files a/static/images/docs/workspace-administration/role-and-member-management/role-list.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/role-and-member-management/role-permissions.png b/static/images/docs/workspace-administration/role-and-member-management/role-permissions.png deleted file mode 100644 index ac24a7ac0..000000000 Binary files a/static/images/docs/workspace-administration/role-and-member-management/role-permissions.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/confirm-upload.png b/static/images/docs/workspace-administration/upload-helm-based-application/confirm-upload.png deleted file mode 100644 index 0b401f590..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/confirm-upload.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/draft-app.png b/static/images/docs/workspace-administration/upload-helm-based-application/draft-app.png deleted file mode 100644 index 2da6e6fb4..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/draft-app.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/upload-app-template.png b/static/images/docs/workspace-administration/upload-helm-based-application/upload-app-template.png deleted file mode 100644 index 9414ff0ff..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/upload-app-template.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/upload-helm.png b/static/images/docs/workspace-administration/upload-helm-based-application/upload-helm.png deleted file mode 100644 index c26994e56..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/upload-helm.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/upload-icon.png b/static/images/docs/workspace-administration/upload-helm-based-application/upload-icon.png deleted file mode 100644 index adda82265..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/upload-icon.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/upload-helm-based-application/version-page.png b/static/images/docs/workspace-administration/upload-helm-based-application/version-page.png deleted file mode 100644 index 3970759ec..000000000 Binary files a/static/images/docs/workspace-administration/upload-helm-based-application/version-page.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-network-isolation/workspace-isolation.png b/static/images/docs/workspace-administration/workspace-network-isolation/workspace-isolation.png deleted file mode 100644 index 14bb53eb9..000000000 Binary files a/static/images/docs/workspace-administration/workspace-network-isolation/workspace-isolation.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-overview/set-workspace-info.png b/static/images/docs/workspace-administration/workspace-overview/set-workspace-info.png deleted file mode 100644 index f346f6279..000000000 Binary files a/static/images/docs/workspace-administration/workspace-overview/set-workspace-info.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-overview/workspace-basic-information.png b/static/images/docs/workspace-administration/workspace-overview/workspace-basic-information.png deleted file mode 100644 index a1388d4dd..000000000 Binary files a/static/images/docs/workspace-administration/workspace-overview/workspace-basic-information.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-overview/workspace-created.png b/static/images/docs/workspace-administration/workspace-overview/workspace-created.png deleted file mode 100644 index 8aa0efae0..000000000 Binary files a/static/images/docs/workspace-administration/workspace-overview/workspace-created.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-overview/workspace-overview.png b/static/images/docs/workspace-administration/workspace-overview/workspace-overview.png deleted file mode 100644 index a82917d25..000000000 Binary files a/static/images/docs/workspace-administration/workspace-overview/workspace-overview.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-overview/workspaces-list.png b/static/images/docs/workspace-administration/workspace-overview/workspaces-list.png deleted file mode 100644 index ad3239472..000000000 Binary files a/static/images/docs/workspace-administration/workspace-overview/workspaces-list.png and /dev/null differ diff --git a/static/images/docs/workspace-administration/workspace-quotas/edit-workspace-quotas.png b/static/images/docs/workspace-administration/workspace-quotas/edit-workspace-quotas.png deleted file mode 100644 index 60d72c76a..000000000 Binary files a/static/images/docs/workspace-administration/workspace-quotas/edit-workspace-quotas.png and /dev/null differ diff --git a/static/images/docs/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png b/static/images/docs/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png deleted file mode 100644 index 059e9cc69..000000000 Binary files a/static/images/docs/zh-cn/access-control-and-account-management/multi-tanancy-in-kubesphere/rbac.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/account-ready-6.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/account-ready-6.PNG deleted file mode 100644 index f995053d1..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/account-ready-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-app-41.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-app-41.PNG deleted file mode 100644 index 266fad513..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-app-41.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-version-43.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-version-43.PNG deleted file mode 100644 index 058331e23..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/activate-version-43.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-active-22.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-active-22.PNG deleted file mode 100644 index 798fc5a36..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-active-22.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-category-25.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-category-25.PNG deleted file mode 100644 index b2d717283..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-category-25.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-draft-10.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-draft-10.PNG deleted file mode 100644 index 60608fa05..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-draft-10.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-in-category-list-expected-29.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-in-category-list-expected-29.PNG deleted file mode 100644 index 90ee6a477..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-in-category-list-expected-29.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-review-name-3.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-review-name-3.PNG deleted file mode 100644 index 709c103b7..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-review-name-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-store.png b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-store.png deleted file mode 100644 index 7892c684a..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-templates-page-21.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-templates-page-21.PNG deleted file mode 100644 index 8eb0b1e27..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-templates-page-21.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-reviewed-19.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-reviewed-19.PNG deleted file mode 100644 index a7e437ab1..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-reviewed-19.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-upgraded-34.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-upgraded-34.PNG deleted file mode 100644 index 13b7f7649..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/app-to-be-upgraded-34.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/confirm-category-28.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/confirm-category-28.PNG deleted file mode 100644 index 0b060a146..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/confirm-category-28.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-review-role-5.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-review-role-5.PNG deleted file mode 100644 index 3121c6e2c..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-review-role-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-role-2.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-role-2.PNG deleted file mode 100644 index e45bb164c..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-role-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-roles-4.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-roles-4.PNG deleted file mode 100644 index af2e9cfbf..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/create-roles-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploy-redis-24.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploy-redis-24.PNG deleted file mode 100644 index 4d8189dad..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploy-redis-24.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployed-instance-success-16.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployed-instance-success-16.PNG deleted file mode 100644 index 4460b8cf7..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployed-instance-success-16.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploying-app-15.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploying-app-15.PNG deleted file mode 100644 index 77a2d3289..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deploying-app-15.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployment-place-14.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployment-place-14.PNG deleted file mode 100644 index b5d8bd78e..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/deployment-place-14.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-information-12.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-information-12.PNG deleted file mode 100644 index b14a41e8f..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-information-12.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-template-11.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-template-11.PNG deleted file mode 100644 index cec90354b..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-app-template-11.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-template-35.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-template-35.PNG deleted file mode 100644 index 3d03deb6f..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/edit-template-35.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/new-version-redis-30.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/new-version-redis-30.PNG deleted file mode 100644 index ec2a7b7df..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/new-version-redis-30.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/redis-23.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/redis-23.PNG deleted file mode 100644 index b7a304b89..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/redis-23.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/remove-app-39.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/remove-app-39.PNG deleted file mode 100644 index d39d5b2d5..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/remove-app-39.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/reviewing-20.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/reviewing-20.PNG deleted file mode 100644 index 8d91c3a48..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/reviewing-20.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/see-new-version-33.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/see-new-version-33.PNG deleted file mode 100644 index 40e0e4a00..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/see-new-version-33.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-app-type-26.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-app-type-26.PNG deleted file mode 100644 index 49b38a2c3..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-app-type-26.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-category-for-app-27.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-category-for-app-27.PNG deleted file mode 100644 index bfd4aec95..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/set-category-for-app-27.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/submit-for-review-17.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/submit-for-review-17.PNG deleted file mode 100644 index 8aab3f79e..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/submit-for-review-17.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/submitted-app-18.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/submitted-app-18.PNG deleted file mode 100644 index eb42d3536..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/submitted-app-18.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-app-40.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-app-40.PNG deleted file mode 100644 index a2fa4df73..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-app-40.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-version-42.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-version-42.PNG deleted file mode 100644 index 59dc79a58..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/suspend-version-42.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/test-deployment-13.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/test-deployment-13.PNG deleted file mode 100644 index fc7dbb9ad..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/test-deployment-13.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-an-app-36.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-an-app-36.PNG deleted file mode 100644 index 87236752f..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-an-app-36.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-finish-38.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-finish-38.PNG deleted file mode 100644 index 3f4f1b91b..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upgrade-finish-38.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-app-7.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-app-7.PNG deleted file mode 100644 index 8671a1f8a..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-app-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-icon-9.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-icon-9.PNG deleted file mode 100644 index a7ede7e54..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-icon-9.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-new-redis-version-31.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-new-redis-version-31.PNG deleted file mode 100644 index f415d2a51..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-new-redis-version-31.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-template-8.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-template-8.PNG deleted file mode 100644 index f12feb2bc..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/upload-template-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/uploaded-new-version-32.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/uploaded-new-version-32.PNG deleted file mode 100644 index 03efac5e5..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/uploaded-new-version-32.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/application-lifecycle-management/version-upgraded-37.PNG b/static/images/docs/zh-cn/appstore/application-lifecycle-management/version-upgraded-37.PNG deleted file mode 100644 index b118e0ea0..000000000 Binary files a/static/images/docs/zh-cn/appstore/application-lifecycle-management/version-upgraded-37.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png new file mode 100644 index 000000000..566869282 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-dashboard-networkchaos.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png new file mode 100644 index 000000000..d9ea4c933 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-experiment-scope.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png new file mode 100644 index 000000000..149b52cc5 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-app.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png new file mode 100644 index 000000000..02d469e7e Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-basic-info.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png new file mode 100644 index 000000000..7265ffc1e Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-config.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png new file mode 100644 index 000000000..f3bf39e16 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployed.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployments.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployments.png new file mode 100644 index 000000000..51ba2b22f Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-deployments.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png new file mode 100644 index 000000000..91a43cc18 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/chaos-mesh-nodeport.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png new file mode 100644 index 000000000..2c3cfad42 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-result.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-status.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-status.png new file mode 100644 index 000000000..5c12e48c4 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/experiment-status.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png new file mode 100644 index 000000000..c85b4598a Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/install-chaos-mesh.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png new file mode 100644 index 000000000..7af4952ed Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/login-to-dashboard.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png new file mode 100644 index 000000000..4f7969400 Binary files /dev/null and b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-chaos-mesh/web-show-app.png differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deploy-etcd-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deploy-etcd-3.PNG deleted file mode 100644 index 8a978e60c..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deploy-etcd-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deployment-location-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deployment-location-4.PNG deleted file mode 100644 index 540e783b1..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/deployment-location-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-app-store-2.PNG deleted file mode 100644 index aadbd2dbf..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-running-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-running-6.PNG deleted file mode 100644 index e813413ba..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-running-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-statefulset-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-statefulset-7.PNG deleted file mode 100644 index b1f4a6b47..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-statefulset-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-terminal-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-terminal-8.PNG deleted file mode 100644 index 4eb8c43fe..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/etcd-terminal-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/project-overview-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/project-overview-1.PNG deleted file mode 100644 index 56e6e0a24..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/project-overview-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/specify-volume-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/specify-volume-5.PNG deleted file mode 100644 index 7b31f0152..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-etcd-on-ks/specify-volume-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/app-store-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/app-store-1.PNG deleted file mode 100644 index 35dfa876e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/app-store-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/click-deploy-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/click-deploy-3.PNG deleted file mode 100644 index f4b13faff..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/click-deploy-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/creating-harbor-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/creating-harbor-6.PNG deleted file mode 100644 index 64cf34596..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/creating-harbor-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/deploy-harbor-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/deploy-harbor-4.PNG deleted file mode 100644 index 816490f5e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/deploy-harbor-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/find-harbor-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/find-harbor-2.PNG deleted file mode 100644 index a16693b4a..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-harbor-on-ks/find-harbor-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deploying-memcached-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deploying-memcached-3.PNG deleted file mode 100644 index 8dbb407ec..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deploying-memcached-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deployment-confirm-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deployment-confirm-4.PNG deleted file mode 100644 index 5a93c1772..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/deployment-confirm-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/edit-config-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/edit-config-5.PNG deleted file mode 100644 index 6eaf9806d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/edit-config-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/in-app-store-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/in-app-store-1.PNG deleted file mode 100644 index 11641c275..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/in-app-store-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-app-store-2.PNG deleted file mode 100644 index 2ea93dfe9..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-port-pod-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-port-pod-8.PNG deleted file mode 100644 index c5e9a46d2..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-port-pod-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-running-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-running-6.PNG deleted file mode 100644 index 599d3d223..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-running-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-service-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-service-7.PNG deleted file mode 100644 index 18b47d1b5..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-memcached-on-ks/memcached-service-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/config-file-12.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/config-file-12.PNG deleted file mode 100644 index 0f256a9af..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/config-file-12.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-3.PNG deleted file mode 100644 index 7af0285d4..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-5.PNG deleted file mode 100644 index 942bdc9ca..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/deploy-minio-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/edit-internet-access-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/edit-internet-access-8.PNG deleted file mode 100644 index 0256b8595..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/edit-internet-access-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-app-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-app-1.PNG deleted file mode 100644 index 1aba246b7..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-app-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-deploy-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-deploy-4.PNG deleted file mode 100644 index 9371adf8f..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-deploy-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-detail-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-detail-7.PNG deleted file mode 100644 index 8849e931d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-detail-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-app-store-2.PNG deleted file mode 100644 index 23272e7c1..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-list-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-list-6.PNG deleted file mode 100644 index be105b912..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/minio-in-list-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/nodeport-9.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/nodeport-9.PNG deleted file mode 100644 index 4158c4528..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/nodeport-9.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/port-exposed-10.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/port-exposed-10.PNG deleted file mode 100644 index 5a7816e3d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/port-exposed-10.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/template-list-11.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/template-list-11.PNG deleted file mode 100644 index f47b32bde..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/deploy-minio-on-ks/template-list-11.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/app-store-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/app-store-1.PNG deleted file mode 100644 index 9ad77501d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/app-store-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/confirm-deployment-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/confirm-deployment-4.PNG deleted file mode 100644 index 6075b6b14..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/confirm-deployment-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/deploy-mongodb-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/deploy-mongodb-3.PNG deleted file mode 100644 index 1c4f9cad1..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/deploy-mongodb-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-in-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-in-app-store-2.PNG deleted file mode 100644 index 8569ac30e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-in-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-running-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-running-6.PNG deleted file mode 100644 index 5ee726026..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-running-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-7.PNG deleted file mode 100644 index 4e48ded0d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-service-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-terminal-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-terminal-8.PNG deleted file mode 100644 index 75845a2f1..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/mongodb-terminal-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/set-app-configuration-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/set-app-configuration-5.PNG deleted file mode 100644 index a4b09ccee..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mongodb-app/set-app-configuration-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/click-deploy.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/click-deploy.png deleted file mode 100644 index d60714fab..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/click-deploy.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/deploy-mysql.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/deploy-mysql.png deleted file mode 100644 index 88e08a885..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/deploy-mysql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/edit-internet-access.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/edit-internet-access.png deleted file mode 100644 index ada1ac83f..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/find-mysql.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/find-mysql.png deleted file mode 100644 index a3e2fdb10..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/find-mysql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/go-to-app-store.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/go-to-app-store.png deleted file mode 100644 index d145e0a20..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/go-to-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-port-number.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-port-number.png deleted file mode 100644 index da0da1105..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-port-number.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-running.png deleted file mode 100644 index 9d5eafde7..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-service.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-service.png deleted file mode 100644 index 4515c251f..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-terminal.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-terminal.png deleted file mode 100644 index 639e97c56..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-terminal.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-workload.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-workload.png deleted file mode 100644 index 348c93dfb..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/mysql-workload.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/nodeport-mysql.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/nodeport-mysql.png deleted file mode 100644 index ae72997b9..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/nodeport-mysql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/uncomment-password.png b/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/uncomment-password.png deleted file mode 100644 index 3eb4de7f0..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/mysql-app/uncomment-password.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/app-store-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/app-store-1.PNG deleted file mode 100644 index 56e6e0a24..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/app-store-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/confirm-deployment-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/confirm-deployment-4.PNG deleted file mode 100644 index ff78dce30..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/confirm-deployment-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/deploy-nginx-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/deploy-nginx-3.PNG deleted file mode 100644 index bc36bea37..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/deploy-nginx-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-config-nginx-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-config-nginx-5.PNG deleted file mode 100644 index 4ef1322ce..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-config-nginx-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-internet-access-9.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-internet-access-9.PNG deleted file mode 100644 index 0b25f2c9b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/edit-internet-access-9.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/exposed-port-11.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/exposed-port-11.PNG deleted file mode 100644 index 5e741fc0e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/exposed-port-11.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-in-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-in-app-store-2.PNG deleted file mode 100644 index 82f7d2c0e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-in-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-running-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-running-7.PNG deleted file mode 100644 index 16d1638d6..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-running-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-service-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-service-8.PNG deleted file mode 100644 index ff730e76f..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nginx-service-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nodeport-10.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nodeport-10.PNG deleted file mode 100644 index c8b792c26..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/nginx-app/nodeport-10.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/access-postgresql.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/access-postgresql.png deleted file mode 100644 index 38464dfeb..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/access-postgresql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/click-app-store.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/click-app-store.png deleted file mode 100644 index 200598e45..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/click-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/container-terminal.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/container-terminal.png deleted file mode 100644 index d5889be80..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/container-terminal.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png deleted file mode 100644 index b33ffd5f9..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql.png deleted file mode 100644 index f4d4104ef..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/deploy-postgresql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/edit-internet-access.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/edit-internet-access.png deleted file mode 100644 index 1a9eea175..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/nodeport.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/nodeport.png deleted file mode 100644 index d82f0881b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/port-number.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/port-number.png deleted file mode 100644 index 6f43a10eb..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/port-number.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png deleted file mode 100644 index e9cf67b9b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-ready.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-ready.png deleted file mode 100644 index 690cc85b3..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/postgresql-ready.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/set-config.png b/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/set-config.png deleted file mode 100644 index b810eca17..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/postgresql-app/set-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png deleted file mode 100644 index 40c2e4c24..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png deleted file mode 100644 index 4aa38edac..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png deleted file mode 100644 index 200598e45..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq01.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png deleted file mode 100644 index e1e2f797c..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq02.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png deleted file mode 100644 index 6d121b928..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq021.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png deleted file mode 100644 index 759495fad..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq03.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png deleted file mode 100644 index 209821a32..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq05.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png deleted file mode 100644 index dc9720d30..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq06.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png deleted file mode 100644 index ea3cdcff8..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq07.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png deleted file mode 100644 index d82f0881b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq08.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png b/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png deleted file mode 100644 index b57525dac..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/rabbitmq-app/rabbitmq09.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png deleted file mode 100644 index 9e04548cf..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png deleted file mode 100644 index 804b25f13..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/deploy-radondb-mysql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png deleted file mode 100644 index 42c32880e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png deleted file mode 100644 index 5749a5fab..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png deleted file mode 100644 index a7eee75fe..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png deleted file mode 100644 index c71378cb2..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/radondb-mysql-terminal.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png deleted file mode 100644 index 9dea596dd..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-mysql-app/set-app-configuration.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png deleted file mode 100644 index e7e5542ff..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png deleted file mode 100644 index b5446a61d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/deploy-radondb-postgresql.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/pods-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/pods-running.png deleted file mode 100644 index 69d15b5b3..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/pods-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/project-overview.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/project-overview.png deleted file mode 100644 index 99facd49b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/project-overview.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png deleted file mode 100644 index 43078e908..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-in-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png deleted file mode 100644 index d6fa9e56d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png deleted file mode 100644 index 285be1c55..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png deleted file mode 100644 index 0c054fa02..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/radondb-postgresql-terminal.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png deleted file mode 100644 index 91324dae5..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/set-app-configuration.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png deleted file mode 100644 index 38148a557..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulset-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png deleted file mode 100644 index bb19b6ceb..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volume-status.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volume-status.png deleted file mode 100644 index 9ba509282..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volume-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volumes.png b/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volumes.png deleted file mode 100644 index 627e20189..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/radondb-postgresql-app/volumes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/access-redis-7.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/access-redis-7.PNG deleted file mode 100644 index 22edc9dd2..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/access-redis-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/app-store-1.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/app-store-1.PNG deleted file mode 100644 index 56e6e0a24..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/app-store-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/config-redis-5.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/config-redis-5.PNG deleted file mode 100644 index aa583f8e6..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/config-redis-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/confirm-deployment-4.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/confirm-deployment-4.PNG deleted file mode 100644 index e114d83a5..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/confirm-deployment-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/deploy-redis-3.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/deploy-redis-3.PNG deleted file mode 100644 index ac3c498f9..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/deploy-redis-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-in-app-store-2.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-in-app-store-2.PNG deleted file mode 100644 index 6854decff..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-in-app-store-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-running-6.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-running-6.PNG deleted file mode 100644 index 999b36cc0..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-running-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-terminal-8.PNG b/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-terminal-8.PNG deleted file mode 100644 index 9462be89b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/redis-app/redis-terminal-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-deploy.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-deploy.png deleted file mode 100644 index fc9e369e2..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-deploy.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-next.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-next.png deleted file mode 100644 index fc78ad900..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-next.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-tomcat-service.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-tomcat-service.png deleted file mode 100644 index 0acab96cd..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/click-tomcat-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/deploy-tomcat.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/deploy-tomcat.png deleted file mode 100644 index a29a5313f..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/deploy-tomcat.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/edit-internet-access.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/edit-internet-access.png deleted file mode 100644 index 56c428ca8..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/exposed-port.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/exposed-port.png deleted file mode 100644 index 03301ac00..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/exposed-port.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/find-tomcat.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/find-tomcat.png deleted file mode 100644 index 2e018526e..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/find-tomcat.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/nodeport.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/nodeport.png deleted file mode 100644 index d82f0881b..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-app01.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-app01.png deleted file mode 100644 index 200598e45..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-app01.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-running.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-running.png deleted file mode 100644 index 5df7c474d..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-terminal-icon.png b/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-terminal-icon.png deleted file mode 100644 index e25376600..000000000 Binary files a/static/images/docs/zh-cn/appstore/built-in-apps/tomcat-app/tomcat-terminal-icon.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-clickhouse.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-clickhouse.png deleted file mode 100644 index 87967e996..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-clickhouse.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-repo.png deleted file mode 100644 index d81a137e7..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/add-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/app-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/app-running.png deleted file mode 100644 index 5fb826a03..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/app-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/basic-info.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/basic-info.png deleted file mode 100644 index 9c5b3b277..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/chart-tab.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/chart-tab.png deleted file mode 100644 index a146417b6..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/chart-tab.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png deleted file mode 100644 index af043c1a8..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy-new-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy.png deleted file mode 100644 index 505af6d73..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/click-deploy.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png deleted file mode 100644 index 1d407683e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/clickhouse-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/from-app-templates.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/from-app-templates.png deleted file mode 100644 index d37a8b47f..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/from-app-templates.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/pods-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/pods-running.png deleted file mode 100644 index 07a7e5e91..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/pods-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/project-overview.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/project-overview.png deleted file mode 100644 index 367bb7c96..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/project-overview.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/repo-added.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/repo-added.png deleted file mode 100644 index eb291ee19..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/repo-added.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png deleted file mode 100644 index 7a3c385dd..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulset-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulsets-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulsets-running.png deleted file mode 100644 index e632d3cb8..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volume-status.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volume-status.png deleted file mode 100644 index 181bf443e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volume-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volumes.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volumes.png deleted file mode 100644 index ea8f4e78a..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-clickhouse/volumes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-main-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-main-repo.png deleted file mode 100644 index 552a4ed89..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-main-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-repo.png deleted file mode 100644 index a6ad5b570..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/add-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/added-main-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/added-main-repo.png deleted file mode 100644 index f911f65ba..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/added-main-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/basic_info.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/basic_info.png deleted file mode 100644 index c6d23bc8e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/basic_info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/change_value.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/change_value.png deleted file mode 100644 index 0b6cd86e2..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/change_value.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/click_gitlab.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/click_gitlab.png deleted file mode 100644 index ca1914752..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/click_gitlab.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deploy-app.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deploy-app.png deleted file mode 100644 index 7e0754ea0..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deploy-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deployments-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deployments-running.png deleted file mode 100644 index 4e3611158..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/deployments-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/from-app-templates.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/from-app-templates.png deleted file mode 100644 index 328f8ef44..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/from-app-templates.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/gitlab-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/gitlab-running.png deleted file mode 100644 index 9e5c77de6..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/gitlab-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/initial-password.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/initial-password.png deleted file mode 100644 index 3874123f8..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/initial-password.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-secret.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-secret.png deleted file mode 100644 index 074a9dbef..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-service.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-service.png deleted file mode 100644 index a735c1dd7..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/search-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/statefulsets-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/statefulsets-running.png deleted file mode 100644 index f3fcb323c..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/view_config.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/view_config.png deleted file mode 100644 index a521a3671..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-gitlab/view_config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-metersphere-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-metersphere-repo.png deleted file mode 100644 index b32d5a5bb..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-metersphere-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-repo.png deleted file mode 100644 index f962e03fb..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/add-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/added-metersphere-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/added-metersphere-repo.png deleted file mode 100644 index 1ebc05d44..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/added-metersphere-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/basic-info.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/basic-info.png deleted file mode 100644 index 88c2396df..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/change-value.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/change-value.png deleted file mode 100644 index 22c392fa5..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/change-value.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/click-metersphere.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/click-metersphere.png deleted file mode 100644 index 65150906e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/click-metersphere.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deploy-app.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deploy-app.png deleted file mode 100644 index 79721ff93..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deploy-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deployments-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deployments-running.png deleted file mode 100644 index 554188d51..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/deployments-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/from-app-templates.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/from-app-templates.png deleted file mode 100644 index 6e65a09b7..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/from-app-templates.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-running.png deleted file mode 100644 index 61afdaa7f..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-service.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-service.png deleted file mode 100644 index 00594cdb7..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/metersphere-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/statefulsets-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/statefulsets-running.png deleted file mode 100644 index e88893d13..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/statefulsets-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/view-config.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/view-config.png deleted file mode 100644 index ecfd44800..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-metersphere/view-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.png deleted file mode 100644 index 6586f4b2c..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.png deleted file mode 100644 index 010157035..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.png deleted file mode 100644 index 9497c5e5d..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.png deleted file mode 100644 index cac1c7c41..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.png deleted file mode 100644 index b1ee6cf0f..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.png deleted file mode 100644 index 090dc1cd2..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.png deleted file mode 100644 index d91e74e15..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.png deleted file mode 100644 index 9a8d68d8e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.png deleted file mode 100644 index e8a05262f..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.png deleted file mode 100644 index d070dc071..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.png deleted file mode 100644 index e5929594d..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.png deleted file mode 100644 index aac8aa1ff..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.png deleted file mode 100644 index c98562e2b..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.png deleted file mode 100644 index 8348845e1..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.png deleted file mode 100644 index 029bd55f6..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.png deleted file mode 100644 index 6dea11894..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.png deleted file mode 100644 index 24c7c8c08..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.png deleted file mode 100644 index 1660e02b8..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.png deleted file mode 100644 index 74eb39b5e..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.png deleted file mode 100644 index 24eb29d3d..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.png deleted file mode 100644 index 571c0d919..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.png deleted file mode 100644 index 54ba12259..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.png deleted file mode 100644 index 34f25eb29..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.png deleted file mode 100644 index 0ff25cb1d..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.png deleted file mode 100644 index f64eaa5ee..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.png deleted file mode 100644 index 50b63bc08..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.png deleted file mode 100644 index 955947643..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.png and /dev/null differ diff --git a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.png b/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.png deleted file mode 100644 index dccac2c96..000000000 Binary files a/static/images/docs/zh-cn/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/application-resources-monitoring1.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/application-resources-monitoring1.png deleted file mode 100644 index 3262a239a..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/application-resources-monitoring1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png deleted file mode 100644 index 3ddb1618b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/cluster-resources-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/clusters-management.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/clusters-management.png deleted file mode 100644 index b07db0ae8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/clusters-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/platform.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/platform.png deleted file mode 100644 index 24dc70e1b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/platform.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/time-range.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/time-range.png deleted file mode 100644 index 88760ad2c..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/time-range.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/usage-ranking.png b/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/usage-ranking.png deleted file mode 100644 index 3fd0031dc..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/application-resources-monitoring/usage-ranking.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.PNG b/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.PNG deleted file mode 100644 index b07d69c10..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/assign-workspace.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png b/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png deleted file mode 100644 index ac1600330..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-project.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.PNG b/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.PNG deleted file mode 100644 index 4c85338ca..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/create-workspace.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-a-cluster.PNG b/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-a-cluster.PNG deleted file mode 100644 index 59fa3262a..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/select-a-cluster.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.PNG b/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.PNG deleted file mode 100644 index 80b8aa171..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/cluster-visibility-and-authorization/workspace-list.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-es-as-receiver/add-es.png b/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-es-as-receiver/add-es.png deleted file mode 100644 index 6391ce191..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-es-as-receiver/add-es.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/add-fluentd.png b/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/add-fluentd.png deleted file mode 100644 index a6d54c54d..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/add-fluentd.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/container-logs.png b/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/container-logs.png deleted file mode 100644 index 5d0f50430..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-fluentd-as-receiver/container-logs.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-kafka-as-receiver/add-kafka.png b/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-kafka-as-receiver/add-kafka.png deleted file mode 100644 index 82eee8c09..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-settings/log-collection/add-kafka-as-receiver/add-kafka.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png deleted file mode 100644 index f7778569b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/apiserver-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-nodes.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-nodes.png deleted file mode 100644 index f484462ee..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-nodes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png deleted file mode 100644 index 7f6334ce0..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-resources-usage.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png deleted file mode 100644 index 3c07d7fe1..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cluster-status-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/clusters-management.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/clusters-management.png deleted file mode 100644 index b07db0ae8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/clusters-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/component-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/component-monitoring.png deleted file mode 100644 index 14bfc3c82..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/component-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-load-average.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-load-average.png deleted file mode 100644 index 9279a9433..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-load-average.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-utilization.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-utilization.png deleted file mode 100644 index 31987e09b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/cpu-utilization.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-throughput.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-throughput.png deleted file mode 100644 index 11fd6c869..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-throughput.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-usage.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-usage.png deleted file mode 100644 index 6ba10c29b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/disk-usage.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/etcd-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/etcd-monitoring.png deleted file mode 100644 index c599fbeb5..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/etcd-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/inode-utilization.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/inode-utilization.png deleted file mode 100644 index 8c594d013..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/inode-utilization.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/iops.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/iops.png deleted file mode 100644 index 6da5cc945..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/iops.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/memory-utilization.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/memory-utilization.png deleted file mode 100644 index df1320784..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/memory-utilization.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/monitoring.png deleted file mode 100644 index 5f03690f8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/netework-bandwidth.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/netework-bandwidth.png deleted file mode 100644 index fbafd0a8f..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/netework-bandwidth.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/node-usage-ranking.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/node-usage-ranking.png deleted file mode 100644 index 9b1034943..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/node-usage-ranking.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png deleted file mode 100644 index 987af242d..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/physical-resources-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/pod-status.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/pod-status.png deleted file mode 100644 index 8e2b1e148..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/pod-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/running-status.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/running-status.png deleted file mode 100644 index 88411f431..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/running-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitorin.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitorin.png deleted file mode 100644 index d315a3e67..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitorin.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png deleted file mode 100644 index ea30ea057..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/scheduler-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/service-components-status.png b/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/service-components-status.png deleted file mode 100644 index ec8c386c6..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-status-monitoring/service-components-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alert-message-page.png b/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alert-message-page.png deleted file mode 100644 index b8d1db029..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alert-message-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting-policy-details-page.png b/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting-policy-details-page.png deleted file mode 100644 index d9b1305d1..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting-policy-details-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/Node-Detail.png b/static/images/docs/zh-cn/cluster-administration/node-management/Node-Detail.png deleted file mode 100644 index 3fc136b3d..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/Node-Detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/Node-Status.png b/static/images/docs/zh-cn/cluster-administration/node-management/Node-Status.png deleted file mode 100644 index 9bf7af5af..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/Node-Status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/add-taint.png b/static/images/docs/zh-cn/cluster-administration/node-management/add-taint.png deleted file mode 100644 index 5cefd9bf6..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/add-taint.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/assign_pods_to_node1.png b/static/images/docs/zh-cn/cluster-administration/node-management/assign_pods_to_node1.png deleted file mode 100644 index 5fbae73bd..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/assign_pods_to_node1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/cluster-management1.png b/static/images/docs/zh-cn/cluster-administration/node-management/cluster-management1.png deleted file mode 100644 index a774e78d0..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/cluster-management1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/clusters-select.png b/static/images/docs/zh-cn/cluster-administration/node-management/clusters-select.png deleted file mode 100644 index 6cb90bf04..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/clusters-select.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/label-node.png b/static/images/docs/zh-cn/cluster-administration/node-management/label-node.png deleted file mode 100644 index 317100036..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/label-node.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/manage-taint.png b/static/images/docs/zh-cn/cluster-administration/node-management/manage-taint.png deleted file mode 100644 index 85f8c9eb5..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/manage-taint.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/node-management/node-drop-down-list.png b/static/images/docs/zh-cn/cluster-administration/node-management/node-drop-down-list.png deleted file mode 100644 index 5cbf81670..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/node-management/node-drop-down-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-settings.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-settings.PNG deleted file mode 100644 index f7d031104..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-storage-system.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-storage-system.PNG deleted file mode 100644 index e38c03126..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/create-storage-class-storage-system.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/custom-storage-class.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/custom-storage-class.PNG deleted file mode 100644 index f254bab6f..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/custom-storage-class.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-class.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-class.PNG deleted file mode 100644 index 8c7fd7e17..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-class.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-system.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-system.PNG deleted file mode 100644 index 0d4c7a8f8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-system.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-volume-qingcloud.PNG b/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-volume-qingcloud.PNG deleted file mode 100644 index af398b635..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/persistent-volumes-and-storage-classes/storage-volume-qingcloud.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/access-token.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/access-token.png deleted file mode 100644 index c490caf87..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/access-token.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/activate-chat-access.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/activate-chat-access.png deleted file mode 100644 index 05936c02d..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/activate-chat-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-info.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-info.png deleted file mode 100644 index d70b28171..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-page.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-page.png deleted file mode 100644 index 9316f83ba..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/app-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/application-form.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/application-form.png deleted file mode 100644 index 2d2d873be..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/application-form.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/apply-for-access.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/apply-for-access.png deleted file mode 100644 index bfe502307..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/apply-for-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/chat-id.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/chat-id.png deleted file mode 100644 index 95816a574..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/chat-id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-add.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-add.png deleted file mode 100644 index 77d0fd49c..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-add.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-custom.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-custom.png deleted file mode 100644 index 97120b6d3..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-custom.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-robot.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-robot.png deleted file mode 100644 index 6860a87a4..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/click-robot.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-app.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-app.png deleted file mode 100644 index 548799005..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-robot.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-robot.png deleted file mode 100644 index ccc81daea..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/create-robot.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dev-mgt.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dev-mgt.png deleted file mode 100644 index b5f5c23fb..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dev-mgt.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configurations.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configurations.png deleted file mode 100644 index 69248ef78..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configurations.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configured.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configured.png deleted file mode 100644 index d9fdd5949..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/dingtalk-configured.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/enter-public-ip.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/enter-public-ip.png deleted file mode 100644 index cca0b6e4c..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/enter-public-ip.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/robot-configs.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/robot-configs.png deleted file mode 100644 index 92be082b8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/robot-configs.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/user-id.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/user-id.png deleted file mode 100644 index 7a43aebf4..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/user-id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/userid-phone.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/userid-phone.png deleted file mode 100644 index 2f2d3807e..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/userid-phone.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/view-robot.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/view-robot.png deleted file mode 100644 index 6cc25a002..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-dingtalk/view-robot.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/email-server.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/email-server.png deleted file mode 100644 index edad2f2b7..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/email-server.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png deleted file mode 100644 index 6de2eed92..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-email/example-email-notification.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png deleted file mode 100644 index dcf5b45b2..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/add-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/example-notification1.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/example-notification1.png deleted file mode 100644 index 3caaecb8a..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/example-notification1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png deleted file mode 100644 index 8f688810d..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/oauth-token.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png deleted file mode 100644 index 038c26edc..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-notification.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png deleted file mode 100644 index a9f40b107..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-slack/slack-scope.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-dept.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-dept.png deleted file mode 100644 index ea73a3f23..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-dept.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member-to-tag.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member-to-tag.png deleted file mode 100644 index 91124fc13..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member-to-tag.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member.png deleted file mode 100644 index 757248772..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-member.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-tag.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-tag.png deleted file mode 100644 index 7522709ec..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/add-tag.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-created.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-created.png deleted file mode 100644 index 5e571d0e1..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-created.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-detail.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-detail.png deleted file mode 100644 index c8f157085..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/app-detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/click-create-app.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/click-create-app.png deleted file mode 100644 index 4aef0f2b0..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/click-create-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/company-id.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/company-id.png deleted file mode 100644 index 29c147156..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/company-id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-app.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-app.png deleted file mode 100644 index ee9c32347..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-tag.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-tag.png deleted file mode 100644 index 3520deaa2..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/create-tag.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/dept-id.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/dept-id.png deleted file mode 100644 index ae0e0fdc1..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/dept-id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/enter-dept-name.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/enter-dept-name.png deleted file mode 100644 index 3f864263c..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/enter-dept-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/member-account.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/member-account.png deleted file mode 100644 index 5eb20621a..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/member-account.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/platform.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/platform.png deleted file mode 100644 index 47f1d9bc8..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/platform.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/set-access.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/set-access.png deleted file mode 100644 index 8aec62d4b..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/set-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/tag-id.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/tag-id.png deleted file mode 100644 index 30fac0444..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/tag-id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/view-secret.png b/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/view-secret.png deleted file mode 100644 index 9ec462ca2..000000000 Binary files a/static/images/docs/zh-cn/cluster-administration/platform-settings/notification-management/configure-wecom/view-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-dockerhub_id.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-dockerhub_id.png deleted file mode 100644 index 888fa295c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-dockerhub_id.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-kubeconfig.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-kubeconfig.PNG deleted file mode 100644 index 733ba768c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-kubeconfig.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline-2.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline-2.PNG deleted file mode 100644 index 6dde6646a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline.PNG deleted file mode 100644 index 25d3dce92..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/create-pipeline.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/credential-docker_create.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/credential-docker_create.png deleted file mode 100644 index 93f24fd09..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/credential-docker_create.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-1.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-1.PNG deleted file mode 100644 index 0bf98d50d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-2.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-2.PNG deleted file mode 100644 index 3a0b6d16a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/docker-image-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-create-token.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-create-token.PNG deleted file mode 100644 index d418b4e5f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-create-token.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-settings.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-settings.PNG deleted file mode 100644 index 7f8b8eb21..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-copy.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-copy.PNG deleted file mode 100644 index b8a2afa95..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-copy.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-ok.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-ok.PNG deleted file mode 100644 index 8d69ad604..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/dockerhub-token-ok.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/edit_jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/edit_jenkinsfile.png deleted file mode 100644 index d8a93ddde..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/edit_jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/pipeline_running.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/pipeline_running.png deleted file mode 100644 index aec0aa170..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/pipeline_running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/run_pipeline.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/run_pipeline.png deleted file mode 100644 index 5fa051de9..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/run_pipeline.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/set-pipeline-name.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/set-pipeline-name.PNG deleted file mode 100644 index 5f80ba976..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/set-pipeline-name.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/view_deployment.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/view_deployment.png deleted file mode 100644 index 835682419..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-a-go-project/view_deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/edit_jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/edit_jenkinsfile.png deleted file mode 100644 index 0a71a3c7a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/edit_jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/run-maven_pipeline.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/run-maven_pipeline.png deleted file mode 100644 index 20cedf49b..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/run-maven_pipeline.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-credential_lists.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-credential_lists.png deleted file mode 100644 index e7721e5d5..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-credential_lists.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-edit_jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-edit_jenkinsfile.png deleted file mode 100644 index 0b33991f4..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-edit_jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload_svc.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload_svc.png deleted file mode 100644 index c21ca4995..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload_svc.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_pipeline.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_pipeline.png deleted file mode 100644 index 362bc6494..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_pipeline.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_workload.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_workload.png deleted file mode 100644 index aaa086354..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven_workload.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view_namespace.png b/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view_namespace.png deleted file mode 100644 index 3f6a0edac..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view_namespace.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-dockerhub-id-1.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-dockerhub-id-1.png deleted file mode 100644 index bd3e5a1db..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-dockerhub-id-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-kubeconfig.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-kubeconfig.PNG deleted file mode 100644 index 277cef178..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-kubeconfig.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-pipeline_2.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-pipeline_2.png deleted file mode 100644 index be8aef7cd..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create-pipeline_2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create_pipeline.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create_pipeline.png deleted file mode 100644 index 2ab6b39d1..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/create_pipeline.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/credential-docker.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/credential-docker.png deleted file mode 100644 index b70c2e753..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/credential-docker.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-create-token.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-create-token.PNG deleted file mode 100644 index 796d6f8ef..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-create-token.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-settings.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-settings.PNG deleted file mode 100644 index a3fe857a6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-copy.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-copy.PNG deleted file mode 100644 index 5aaa77737..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-copy.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-ok.PNG b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-ok.PNG deleted file mode 100644 index 4741ed613..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/dockerhub-token-ok.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/edit_jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/edit_jenkinsfile.png deleted file mode 100644 index a65a05c1d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/edit_jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/multi-cluster_ok.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/multi-cluster_ok.png deleted file mode 100644 index 2602d0793..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/multi-cluster_ok.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/set-pipeline_name.png b/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/set-pipeline_name.png deleted file mode 100644 index 4eff8c84a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/set-pipeline_name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png deleted file mode 100644 index 96ece92ba..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png deleted file mode 100644 index b4da4cd3a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit-yaml.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png deleted file mode 100644 index 2229a5403..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-edit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png deleted file mode 100644 index e7565dc40..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-pom.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png deleted file mode 100644 index 392496b0a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/click-run.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png deleted file mode 100644 index b2099c18f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/enter-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png deleted file mode 100644 index 52f083256..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/ks-devops-agent.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png deleted file mode 100644 index 90fddb7c4..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/maven-public-url.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png deleted file mode 100644 index b5b0982dc..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-logs.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png deleted file mode 100644 index 0cd79d1fa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/pipeline-success.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png deleted file mode 100644 index 83a048a85..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/repo-type.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png b/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png deleted file mode 100644 index 60911ad80..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/examples/use-nexus-in-pipeline/set-pipeline-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings.PNG b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings.PNG deleted file mode 100644 index 09cbc1887..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings1.png deleted file mode 100644 index 632a60188..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info.png deleted file mode 100644 index 2c31b7bcf..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info1.png deleted file mode 100644 index 659c8a58d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/create-credentials.PNG b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/create-credentials.PNG deleted file mode 100644 index 333bc960d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/create-credentials.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page.png deleted file mode 100644 index 33d87eb19..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page2.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page2.png deleted file mode 100644 index 9fa6f4930..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile.PNG b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile.PNG deleted file mode 100644 index c557318c6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile1.png deleted file mode 100644 index 674d76895..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file.png deleted file mode 100644 index ac975f63d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file1.png deleted file mode 100644 index 8dd0fac03..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects.png deleted file mode 100644 index 2dd1b5c35..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects1.png deleted file mode 100644 index a5be3423c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name.png deleted file mode 100644 index c68ac8d65..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name1.png deleted file mode 100644 index 12e070e2c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account.png deleted file mode 100644 index b34d0dd11..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account1.png deleted file mode 100644 index 3b3c0b0cf..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name.png deleted file mode 100644 index 25502716e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name1.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name1.png deleted file mode 100644 index 8ece4a1f4..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/access-sonarqube-console.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/access-sonarqube-console.png deleted file mode 100644 index de9cd08d8..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/access-sonarqube-console.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-sonarqube.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-sonarqube.png deleted file mode 100644 index a2db82dc2..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-sonarqube.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/code-analysis.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/code-analysis.png deleted file mode 100644 index 50ddd94c5..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/code-analysis.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/configure-system.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/configure-system.png deleted file mode 100644 index 0f2f7ed54..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/configure-system.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-login-page.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-login-page.png deleted file mode 100644 index dbcfd9a33..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-login-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/log-in-page.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/log-in-page.png deleted file mode 100644 index e7c035888..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/log-in-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/manage-jenkins.png b/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/manage-jenkins.png deleted file mode 100644 index b6410d12e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops-2.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops-2.png deleted file mode 100644 index a933b797c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page-4.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page-4.png deleted file mode 100644 index d1ffed670..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page-4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list-3.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list-3.png deleted file mode 100644 index 001a6914c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list-3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create-1.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create-1.png deleted file mode 100644 index f9a192ca0..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info-5.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info-5.png deleted file mode 100644 index 7c7b5fe53..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info-5.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list-1.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list-1.png deleted file mode 100644 index cd0008137..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/pipeline-list-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.PNG b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.PNG deleted file mode 100644 index 2ee756ab8..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/overview/sonarqube-result-detail.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-invite-member_4.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-invite-member_4.png deleted file mode 100644 index ca048028c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-invite-member_4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-list_3.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-list_3.png deleted file mode 100644 index 976dd917e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-list_3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_1.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_1.png deleted file mode 100644 index a1499bf9d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_2.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_2.png deleted file mode 100644 index 30ef2c3f7..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step_2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-user-edit_5.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-user-edit_5.png deleted file mode 100644 index 140ff7598..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-user-edit_5.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png b/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png deleted file mode 100644 index 37c20ebd2..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/three-dots.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.PNG deleted file mode 100644 index 128f815b4..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/activity-failure.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/activity-failure.PNG deleted file mode 100644 index 6cd563c62..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/activity-failure.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/advanced-settings.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/advanced-settings.PNG deleted file mode 100644 index fb1bd1a40..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/advanced-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.PNG deleted file mode 100644 index 225df14d4..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.PNG deleted file mode 100644 index e5a83523a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes1.png deleted file mode 100644 index 7a57c9963..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.PNG deleted file mode 100644 index 68c853111..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.PNG deleted file mode 100644 index 48a74482d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.PNG deleted file mode 100644 index f4a360205..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list1.png deleted file mode 100644 index 539773f14..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.PNG deleted file mode 100644 index c50c2d8bc..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.PNG deleted file mode 100644 index b5b2fdb45..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.PNG deleted file mode 100644 index 5dc46c69f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo1.png deleted file mode 100644 index c3b77d2fa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.PNG deleted file mode 100644 index d8b582251..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.PNG deleted file mode 100644 index a16472401..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-result.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-result.PNG deleted file mode 100644 index 1019904b7..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-result.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.PNG deleted file mode 100644 index ef2ce827f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.PNG deleted file mode 100644 index 5f4e180ec..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit--1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit--1.png deleted file mode 100644 index 6ebcc09b8..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit--1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.PNG deleted file mode 100644 index cdc5c2aeb..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.PNG deleted file mode 100644 index b7a04ec16..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.PNG deleted file mode 100644 index 3496ca8f9..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.PNG deleted file mode 100644 index 3bd654dac..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.PNG deleted file mode 100644 index 32681bc88..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.PNG deleted file mode 100644 index 48fa60501..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.PNG deleted file mode 100644 index 8e0fb75b6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/project-list.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/project-list.PNG deleted file mode 100644 index a92b5a7ff..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/project-list.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.PNG deleted file mode 100644 index 88561917d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy1.png deleted file mode 100644 index a39a5caeb..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sample-app-result-check.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sample-app-result-check.PNG deleted file mode 100644 index 0f0e4888c..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sample-app-result-check.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select-token1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select-token1.png deleted file mode 100644 index e95688371..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select-token1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select_repo.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select_repo.png deleted file mode 100644 index 1891255d0..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select_repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.PNG deleted file mode 100644 index 8f2d08954..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail-1.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail-1.PNG deleted file mode 100644 index b1bea115a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.PNG deleted file mode 100644 index 09183b0aa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.PNG deleted file mode 100644 index 4ecf7f5aa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/access_service.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/access_service.png deleted file mode 100644 index 2b3e419c3..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/access_service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/completed.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/completed.png deleted file mode 100644 index 2aa0f6f9a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/completed.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/inspect_logs.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/inspect_logs.png deleted file mode 100644 index f36c57155..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/inspect_logs.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/kubernetesDeploy_set.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/kubernetesDeploy_set.png deleted file mode 100644 index 2d5986267..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/kubernetesDeploy_set.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_done.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_done.png deleted file mode 100644 index 7bc1e317f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/pipeline_done.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/service_exposed.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/service_exposed.png deleted file mode 100644 index 75b8fd250..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/service_exposed.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_set.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_set.png deleted file mode 100644 index a38cc73ac..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/shell_set.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/unit_test_set.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/unit_test_set.png deleted file mode 100644 index f6aa50510..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/unit_test_set.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/view_deployment.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/view_deployment.png deleted file mode 100644 index d2f37682a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-graphical-editing-panel/view_deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-step1.PNG b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-step1.PNG deleted file mode 100644 index c376bdf94..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-step1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential_page.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential_page.png deleted file mode 100644 index 5eae33caf..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential_page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-detail_page.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-detail_page.png deleted file mode 100644 index 165df6ddb..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-detail_page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential_list.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential_list.png deleted file mode 100644 index d8849608a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential_list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/dockerhub_credentials.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/dockerhub_credentials.png deleted file mode 100644 index bbc33c9c0..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/dockerhub_credentials.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/edit_credentials.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/edit_credentials.png deleted file mode 100644 index 25c861eab..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/edit_credentials.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/check-log.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/check-log.png deleted file mode 100644 index a43bd32f3..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/check-log.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-import-project.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-import-project.png deleted file mode 100644 index 9d69ba821..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-import-project.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-run.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-run.png deleted file mode 100644 index c0212f851..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/click-run.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/commit-changes.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/commit-changes.png deleted file mode 100644 index b710a03cc..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/commit-changes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/create-pipeline.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/create-pipeline.png deleted file mode 100644 index 8659ff0ac..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/create-pipeline.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/credential-created.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/credential-created.png deleted file mode 100644 index 9eb3964f6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/credential-created.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/deployment.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/deployment.png deleted file mode 100644 index 55ef9074f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/docker-image.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/docker-image.png deleted file mode 100644 index 0ad871ac7..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/docker-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-demo.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-demo.png deleted file mode 100644 index e61979fb5..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-demo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-result.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-result.png deleted file mode 100644 index 6cf758c50..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/gitlab-result.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png deleted file mode 100644 index c3b5caef1..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/jenkinsfile-online.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/new-branch.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/new-branch.png deleted file mode 100644 index 59f4fa063..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/new-branch.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/pipeline-logs.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/pipeline-logs.png deleted file mode 100644 index 479a2357d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/pipeline-logs.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-branch.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-branch.png deleted file mode 100644 index c610e6a5d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-branch.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-gitlab.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-gitlab.png deleted file mode 100644 index a9b210a75..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/select-gitlab.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/service.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/service.png deleted file mode 100644 index 796d25121..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/use-git-url.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/use-git-url.png deleted file mode 100644 index c537f9c7a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/gitlab-multibranch-pipeline/use-git-url.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-add.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-add.png deleted file mode 100644 index 7874fe18e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-add.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-configure.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-configure.png deleted file mode 100644 index ac5d585b9..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-configure.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-create.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-create.png deleted file mode 100644 index a240fc512..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-create.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-run.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-run.png deleted file mode 100644 index 3c4ec0659..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/click-run.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/configure-shared-library.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/configure-shared-library.png deleted file mode 100644 index 8adbcfe2a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/configure-shared-library.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/edit-jenkinsfile.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/edit-jenkinsfile.png deleted file mode 100644 index 5f7e0b6c0..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/edit-jenkinsfile.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/log-details.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/log-details.png deleted file mode 100644 index 042dcbdea..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/log-details.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/run-successfully.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/run-successfully.png deleted file mode 100644 index 2cab18128..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/run-successfully.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/set-name.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/set-name.png deleted file mode 100644 index 0cdcb5b83..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-shared-library/set-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/apply-config.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/apply-config.png deleted file mode 100644 index 0b4893baa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/apply-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/configuration-as-code.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/configuration-as-code.png deleted file mode 100644 index 2ed71a02d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/configuration-as-code.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-configmap.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-configmap.png deleted file mode 100644 index c8857a1c8..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-configmap.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-jenkins.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-jenkins.png deleted file mode 100644 index 6dc432445..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-jenkins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/jenkins-dashboard.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/jenkins-dashboard.png deleted file mode 100644 index 0cfb44a1b..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/jenkins-dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/manage-jenkins.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/manage-jenkins.png deleted file mode 100644 index c00f15fd6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/more-list.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/more-list.png deleted file mode 100644 index 604e9b9ba..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/more-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/basic-info-tab1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/basic-info-tab1.png deleted file mode 100644 index 5544217a6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/basic-info-tab1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/behavioral-strategy1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/behavioral-strategy1.png deleted file mode 100644 index 7abfd313f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/behavioral-strategy1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/branch-settings1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/branch-settings1.png deleted file mode 100644 index c54b52091..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/branch-settings1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-settings1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-settings1.png deleted file mode 100644 index 8bf5d6024..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-settings1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger--2.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger--2.png deleted file mode 100644 index 8aa268624..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger--2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger1.png deleted file mode 100644 index 98911133f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/build-trigger1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-bitbucket1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-bitbucket1.png deleted file mode 100644 index 5dfbcdabe..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-bitbucket1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-git1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-git1.png deleted file mode 100644 index 5b87716e9..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-git1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-github1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-github1.png deleted file mode 100644 index 5a733d9e3..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-github1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-gitlab1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-gitlab1.png deleted file mode 100644 index bd6559ea9..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-gitlab1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-svn1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-svn1.png deleted file mode 100644 index ec0695777..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/code-source-svn1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/git-clone-options1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/git-clone-options1.png deleted file mode 100644 index 68032480f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/git-clone-options1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/parametric-build1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/parametric-build1.png deleted file mode 100644 index cbd6f7e48..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/parametric-build1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/scan-repo-trigger1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/scan-repo-trigger1.png deleted file mode 100644 index af84ba8cc..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/scan-repo-trigger1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/script-path1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/script-path1.png deleted file mode 100644 index fd09b19aa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/script-path1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/webhook-push1.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/webhook-push1.png deleted file mode 100644 index f1a8ae97e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-settings/webhook-push1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/add-webhook.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/add-webhook.png deleted file mode 100644 index df5fb562f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/add-webhook.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-add-webhook.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-add-webhook.png deleted file mode 100644 index 84f111c32..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-add-webhook.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-file.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-file.png deleted file mode 100644 index 4c26dba5a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-sonar.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-sonar.png deleted file mode 100644 index 8acdabad6..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/click-sonar.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/delivery-detail.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/delivery-detail.png deleted file mode 100644 index 7bf47c85f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/delivery-detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-config.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-config.png deleted file mode 100644 index 1082f9262..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-file.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-file.png deleted file mode 100644 index 5a817c061..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/edit-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pipeline-triggered.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pipeline-triggered.png deleted file mode 100644 index 97531e744..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pipeline-triggered.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pods.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pods.png deleted file mode 100644 index c7031fb7d..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/pods.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-push.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-push.png deleted file mode 100644 index 39197d5f1..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-push.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-ready.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-ready.png deleted file mode 100644 index 35b3eb533..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/pipeline-webhook/webhook-ready.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-ci-label.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-ci-label.png deleted file mode 100644 index 47e076e50..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-ci-label.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-taint.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-taint.png deleted file mode 100644 index a5b319e05..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-taint.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/node-management.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/node-management.png deleted file mode 100644 index 681275c5f..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/node-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-ci-node.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-ci-node.png deleted file mode 100644 index 654e72868..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-ci-node.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-taint-management.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-taint-management.png deleted file mode 100644 index 9c113db8a..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-taint-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/taint-result.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/taint-result.png deleted file mode 100644 index 010a36118..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/taint-result.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png index 604ab1226..590a50b90 100644 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png and b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/three-dots.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/three-dots.png deleted file mode 100644 index bd841635e..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/three-dots.png and /dev/null differ diff --git a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/workloads_list.png b/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/workloads_list.png deleted file mode 100644 index e265aadaa..000000000 Binary files a/static/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/workloads_list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubeedge/edge-nodes.png b/static/images/docs/zh-cn/enable-pluggable-components/kubeedge/edge-nodes.png deleted file mode 100644 index 2543d653e..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubeedge/edge-nodes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-alerting/alerting-section.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-alerting/alerting-section.png deleted file mode 100644 index 05acc3b8c..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-alerting/alerting-section.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-app-store/app-store-page.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-app-store/app-store-page.png deleted file mode 100644 index 7e8cba299..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-app-store/app-store-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png deleted file mode 100644 index 0d061d4e2..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-auditing-logs/auditing-operating.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-devops-system/devops.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-devops-system/devops.png deleted file mode 100644 index 29ee96b15..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-devops-system/devops.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-events/event-search.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-events/event-search.png deleted file mode 100644 index e19d1b738..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-events/event-search.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-logging-system/logging.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-logging-system/logging.png deleted file mode 100644 index 49e1b283d..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-logging-system/logging.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-service-mesh/istio.png b/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-service-mesh/istio.png deleted file mode 100644 index 45fe547bf..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/kubesphere-service-mesh/istio.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/network-policies/network-policy.PNG b/static/images/docs/zh-cn/enable-pluggable-components/network-policies/network-policy.PNG deleted file mode 100644 index d87eea1e6..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/network-policies/network-policy.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png b/static/images/docs/zh-cn/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png deleted file mode 100644 index 4e0b1173a..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/pod-ip-pools/pod-ip-pool.png and /dev/null differ diff --git a/static/images/docs/zh-cn/enable-pluggable-components/service-topology/topology.png b/static/images/docs/zh-cn/enable-pluggable-components/service-topology/topology.png deleted file mode 100644 index 2efaa0ecc..000000000 Binary files a/static/images/docs/zh-cn/enable-pluggable-components/service-topology/topology.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/assign-workspace.PNG b/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/assign-workspace.PNG deleted file mode 100644 index ce291fc80..000000000 Binary files a/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/assign-workspace.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/project-page.PNG b/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/project-page.PNG deleted file mode 100644 index ff8d99c19..000000000 Binary files a/static/images/docs/zh-cn/faq/access-control-and-account-management/add-kubernetes-namespace-to-kubesphere-workspace/project-page.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/activate-tomcat.png b/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/activate-tomcat.png deleted file mode 100644 index 1ec6ca353..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/activate-tomcat.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/click-tomcat.png b/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/click-tomcat.png deleted file mode 100644 index b243bf9a6..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/click-tomcat.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/confirm-suspend.png b/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/confirm-suspend.png deleted file mode 100644 index f069eefad..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/confirm-suspend.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/suspend-tomcat.png b/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/suspend-tomcat.png deleted file mode 100644 index ddc940786..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/remove-built-in-apps/suspend-tomcat.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/confirm-delete.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/confirm-delete.png deleted file mode 100644 index 31e347105..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/confirm-delete.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-redis-1.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-redis-1.png deleted file mode 100644 index cadf16856..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-redis-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-secret.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-secret.png deleted file mode 100644 index 70fa11d0d..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/delete-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/error-prompt.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/error-prompt.png deleted file mode 100644 index 8e018cf32..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/error-prompt.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/new-redis-app.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/new-redis-app.png deleted file mode 100644 index 5f7d706a6..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/new-redis-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/redis-1.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/redis-1.png deleted file mode 100644 index d969da01c..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/redis-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/search-secret.png b/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/search-secret.png deleted file mode 100644 index fffd8a5f2..000000000 Binary files a/static/images/docs/zh-cn/faq/applications/reuse-the-same-app-name-after-deletion/search-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png b/static/images/docs/zh-cn/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png deleted file mode 100644 index df51d13a6..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/create-devops-kubeconfig-on-aws/create-kubeconfig.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/available-plugins.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/available-plugins.png deleted file mode 100644 index 997fd980e..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/available-plugins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png deleted file mode 100644 index dba60b404..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-advanced-tab.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png deleted file mode 100644 index 4b56afb07..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-jenkins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png deleted file mode 100644 index 68a285533..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/click-manage-plugins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/installed-plugins.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/installed-plugins.png deleted file mode 100644 index 2dfafb7ff..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/installed-plugins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/update-plugins.png b/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/update-plugins.png deleted file mode 100644 index cae93cb47..000000000 Binary files a/static/images/docs/zh-cn/faq/devops/install-plugins-to-jenkins/update-plugins.png and /dev/null differ diff --git a/static/images/docs/zh-cn/faq/forgot-password/modify-password.png b/static/images/docs/zh-cn/faq/forgot-password/modify-password.png deleted file mode 100644 index 6ed1f83e3..000000000 Binary files a/static/images/docs/zh-cn/faq/forgot-password/modify-password.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-UI.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-UI.png deleted file mode 100644 index ecd2a0cce..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-UI.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-console.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-console.png deleted file mode 100644 index b0af1b308..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-ack/ks-console.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-cluster.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-cluster.png deleted file mode 100644 index 0f8e1640b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/aks-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/doks-cluster.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/doks-cluster.png deleted file mode 100644 index 1715e2c1b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/doks-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/gke-cluster.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/gke-cluster.png deleted file mode 100644 index 8c1180ce4..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/gke-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/gke-cluster.png b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/gke-cluster.png deleted file mode 100644 index 5e905207b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/gke-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/概览页面.jpg b/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/概览页面.jpg deleted file mode 100644 index c0887721f..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/概览页面.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/installing-on-on-premises-kubernetes/air-gapped-installation/kubesphere-login.PNG b/static/images/docs/zh-cn/installing-on-kubernetes/installing-on-on-premises-kubernetes/air-gapped-installation/kubesphere-login.PNG deleted file mode 100644 index 198ffc63b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/installing-on-on-premises-kubernetes/air-gapped-installation/kubesphere-login.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-kubernetes/introduction/login.png b/static/images/docs/zh-cn/installing-on-kubernetes/introduction/login.png deleted file mode 100644 index 50dc681f5..000000000 Binary files a/static/images/docs/zh-cn/installing-on-kubernetes/introduction/login.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-2.png b/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-2.png deleted file mode 100644 index ca4c729eb..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node1.png b/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node1.png deleted file mode 100644 index a2eda95b8..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/add-edge-nodes/edge-node1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png b/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png deleted file mode 100644 index ef572bc82..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/add-and-delete-nodes/delete-nodes/cordon.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/booster-url.PNG b/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/booster-url.PNG deleted file mode 100644 index 0e51f2b5b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/booster-url.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/container-registry.PNG b/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/container-registry.PNG deleted file mode 100644 index 717c5522c..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/container-registry.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/image-booster.PNG b/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/image-booster.PNG deleted file mode 100644 index 30e84dcbd..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/faq/configure-booster/image-booster.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png b/static/images/docs/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png deleted file mode 100644 index a0ea16114..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/installing-on-public-cloud/deploy-kubesphere-on-qingcloud-instances/cluster-node.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/introduction/air-gapped-installation/kubesphere-login.PNG b/static/images/docs/zh-cn/installing-on-linux/introduction/air-gapped-installation/kubesphere-login.PNG deleted file mode 100644 index aa6d8991c..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/introduction/air-gapped-installation/kubesphere-login.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png b/static/images/docs/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png new file mode 100644 index 000000000..cafe6ad34 Binary files /dev/null and b/static/images/docs/zh-cn/installing-on-linux/introduction/internal-ha-configuration/internalLoadBalancer.png differ diff --git a/static/images/docs/zh-cn/installing-on-linux/introduction/multi-node-installation/login.PNG b/static/images/docs/zh-cn/installing-on-linux/introduction/multi-node-installation/login.PNG deleted file mode 100644 index aa6d8991c..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/introduction/multi-node-installation/login.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/on-premises/cluster-management.png b/static/images/docs/zh-cn/installing-on-linux/on-premises/cluster-management.png deleted file mode 100644 index 818b28da4..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/on-premises/cluster-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/on-premises/service-components.png b/static/images/docs/zh-cn/installing-on-linux/on-premises/service-components.png deleted file mode 100644 index 1bfe61e7e..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/on-premises/service-components.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png deleted file mode 100644 index 73e22a9ae..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/storage-class-available.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png deleted file mode 100644 index 0b7d12f3b..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/glusterfs-client/volumes-in-use.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png deleted file mode 100644 index 4a30f76d9..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-pod.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png deleted file mode 100644 index 7a8e5916e..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/nfs-client/nfs-storage-class.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png deleted file mode 100644 index 67d1b10bc..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-pod.png and /dev/null differ diff --git a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png b/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png deleted file mode 100644 index 8ae206f69..000000000 Binary files a/static/images/docs/zh-cn/installing-on-linux/persistent-storage-configurations/qingcloud-csi/qingcloud-csi-storage-class.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.PNG deleted file mode 100644 index 5fbfaf9cf..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/add-cluster.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.PNG deleted file mode 100644 index 410848ca0..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-imported.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.PNG deleted file mode 100644 index a812a5e14..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/cluster-info.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png deleted file mode 100644 index a2a2c80ec..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/agent-connection/select-agent-connection.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.PNG deleted file mode 100644 index 5fbfaf9cf..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/add-cluster.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.PNG deleted file mode 100644 index 410848ca0..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-imported.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.PNG deleted file mode 100644 index a812a5e14..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/cluster-info.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.PNG b/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.PNG deleted file mode 100644 index b7342b113..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/enable-multicluster-management-in-kubesphere/direct-connection/kubeconfig.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png deleted file mode 100644 index 8691f7c4f..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/ack-cluster-imported.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png deleted file mode 100644 index f0657c17f..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png deleted file mode 100644 index 12b775006..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/click-edit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png deleted file mode 100644 index 0bc4bb2f1..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/input-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png deleted file mode 100644 index 8242d4fad..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/search-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png deleted file mode 100644 index aa427d8af..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-ack/select-method.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png deleted file mode 100644 index 8e52e3abe..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png deleted file mode 100644 index 8c2259b96..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/click-edit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png deleted file mode 100644 index ec9319f6c..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-kubeconfig.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png deleted file mode 100644 index 646d7efac..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/eks-overview.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png deleted file mode 100644 index 1c1e8d0b3..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/input-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png deleted file mode 100644 index 033502579..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-eks/search-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png deleted file mode 100644 index 101aa08b3..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-add-cluster.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png deleted file mode 100644 index b66f0f276..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/click-edit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png deleted file mode 100644 index 36c15c9cb..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/gke-cluster-imported.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png deleted file mode 100644 index b6d68045b..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/input-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png deleted file mode 100644 index 452f4f4e3..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/search-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png b/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png deleted file mode 100644 index a57404ac1..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/import-cloud-hosted-k8s/import-gke/select-method.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/introduction/kubesphere-federation/central-control-plane.png b/static/images/docs/zh-cn/multicluster-management/introduction/kubesphere-federation/central-control-plane.png deleted file mode 100644 index d76a1516b..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/introduction/kubesphere-federation/central-control-plane.png and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/cluster-management.PNG b/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/cluster-management.PNG deleted file mode 100644 index db4a222c7..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/cluster-management.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/unbind-cluster.PNG b/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/unbind-cluster.PNG deleted file mode 100644 index e0db71580..000000000 Binary files a/static/images/docs/zh-cn/multicluster-management/unbind-a-cluster/unbind-cluster.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/container-limit-ranges/change-limit-ranges.PNG b/static/images/docs/zh-cn/project-administration/container-limit-ranges/change-limit-ranges.PNG deleted file mode 100644 index 8683ae252..000000000 Binary files a/static/images/docs/zh-cn/project-administration/container-limit-ranges/change-limit-ranges.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/container-limit-ranges/default-limit-range.PNG b/static/images/docs/zh-cn/project-administration/container-limit-ranges/default-limit-range.PNG deleted file mode 100644 index 60ba0ab0d..000000000 Binary files a/static/images/docs/zh-cn/project-administration/container-limit-ranges/default-limit-range.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/container-limit-ranges/view-limit-ranges.PNG b/static/images/docs/zh-cn/project-administration/container-limit-ranges/view-limit-ranges.PNG deleted file mode 100644 index 21822b065..000000000 Binary files a/static/images/docs/zh-cn/project-administration/container-limit-ranges/view-limit-ranges.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/container-limit-ranges/workload-values.PNG b/static/images/docs/zh-cn/project-administration/container-limit-ranges/workload-values.PNG deleted file mode 100644 index 4c18b3ef1..000000000 Binary files a/static/images/docs/zh-cn/project-administration/container-limit-ranges/workload-values.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/alpine-image.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/alpine-image.png deleted file mode 100644 index f2bc7a3a1..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/alpine-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/container-log.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/container-log.png deleted file mode 100644 index aebb57afd..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/container-log.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/enable-disk-collection.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/enable-disk-collection.png deleted file mode 100644 index 47e1284ef..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/enable-disk-collection.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/fuzzy-match.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/fuzzy-match.png deleted file mode 100644 index 36fb0a09f..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/fuzzy-match.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/mount-volumes.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/mount-volumes.png deleted file mode 100644 index 8bdb33abe..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/mount-volumes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/run-command.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/run-command.png deleted file mode 100644 index 2fa25d548..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/run-command.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/disk-log-collection/volume-example.png b/static/images/docs/zh-cn/project-administration/disk-log-collection/volume-example.png deleted file mode 100644 index 951fa03f6..000000000 Binary files a/static/images/docs/zh-cn/project-administration/disk-log-collection/volume-example.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png b/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png deleted file mode 100644 index 6e27cb711..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-basic-information.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-list.png b/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-list.png deleted file mode 100644 index f71a4347a..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/multi-cluster-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-basic-information.png b/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-basic-information.png deleted file mode 100644 index 8ebfa1623..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-basic-information.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-list.png b/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-list.png deleted file mode 100644 index e1b422234..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-and-multicluster-project/project-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-gateway/access-method.png b/static/images/docs/zh-cn/project-administration/project-gateway/access-method.png deleted file mode 100644 index e1585d71f..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-gateway/access-method.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-gateway/lb.png b/static/images/docs/zh-cn/project-administration/project-gateway/lb.png deleted file mode 100644 index 06dac4899..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-gateway/lb.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-gateway/nodeport.jpg b/static/images/docs/zh-cn/project-administration/project-gateway/nodeport.jpg deleted file mode 100644 index 440ef4476..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-gateway/nodeport.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-gateway/set-project-gateway.jpg b/static/images/docs/zh-cn/project-administration/project-gateway/set-project-gateway.jpg deleted file mode 100644 index d0f69f0a4..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-gateway/set-project-gateway.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR-added.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR-added.PNG deleted file mode 100644 index ceb54c898..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR-added.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR.PNG deleted file mode 100644 index b76953179..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-CIDR.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule-added.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule-added.PNG deleted file mode 100644 index ed016f755..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule-added.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule.PNG deleted file mode 100644 index 265ca3d72..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/egress-rule.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-CIDR.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-CIDR.PNG deleted file mode 100644 index 668a1c4ac..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-CIDR.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-cidr-set.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-cidr-set.PNG deleted file mode 100644 index cd3fd401f..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-cidr-set.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule-added.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule-added.PNG deleted file mode 100644 index 586f6d4b8..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule-added.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule.PNG deleted file mode 100644 index 557bd2386..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/ingress-rule.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/isolation-off.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/isolation-off.PNG deleted file mode 100644 index 911db0d27..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/isolation-off.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/project-network-isolation/project-network-isolation.PNG b/static/images/docs/zh-cn/project-administration/project-network-isolation/project-network-isolation.PNG deleted file mode 100644 index b24bd3cf0..000000000 Binary files a/static/images/docs/zh-cn/project-administration/project-network-isolation/project-network-isolation.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-member-list.png b/static/images/docs/zh-cn/project-administration/role-and-member-management/project-member-list.png deleted file mode 100644 index c35c6da6f..000000000 Binary files a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-member-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-detail.png b/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-detail.png deleted file mode 100644 index d8e2c1c15..000000000 Binary files a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-list.png b/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-list.png deleted file mode 100644 index 186b3b546..000000000 Binary files a/static/images/docs/zh-cn/project-administration/role-and-member-management/project-role-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-messages/alerting-messages.png b/static/images/docs/zh-cn/project-user-guide/alerting/alerting-messages/alerting-messages.png deleted file mode 100644 index b39bacb6d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-messages/alerting-messages.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alert-policy-created.png b/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alert-policy-created.png deleted file mode 100644 index 0906a0aba..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alert-policy-created.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png b/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png deleted file mode 100644 index 1d4fc410a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/alerting-policy-detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/rule-template1.png b/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/rule-template1.png deleted file mode 100644 index 42763e534..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/alerting/alerting-policies/rule-template1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/add-container-explain.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/add-container-explain.PNG deleted file mode 100644 index 3a8a23555..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/add-container-explain.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/container-health-check.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/container-health-check.PNG deleted file mode 100644 index f25b10fb2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/container-health-check.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/cube-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/cube-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/envi-var.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/envi-var.PNG deleted file mode 100644 index 806f6191f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/envi-var.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/exec-command-check.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/exec-command-check.PNG deleted file mode 100644 index 206b77c65..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/exec-command-check.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/http-request-check.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/http-request-check.PNG deleted file mode 100644 index d77720fac..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/http-request-check.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/image-pull-policy.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/image-pull-policy.PNG deleted file mode 100644 index 45b598260..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/image-pull-policy.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/minus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/plus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/pod-replicas.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/pod-replicas.PNG deleted file mode 100644 index 61a4edd53..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/pod-replicas.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/resource-request-limit.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/resource-request-limit.PNG deleted file mode 100644 index 5ebf88ad0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/resource-request-limit.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/security-context.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/security-context.PNG deleted file mode 100644 index 742231a4d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/security-context.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/start-command.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/start-command.PNG deleted file mode 100644 index 0a8831568..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/start-command.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/tcp-port-check.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/tcp-port-check.PNG deleted file mode 100644 index 2aecc4a2f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/container-image-settings/tcp-port-check.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-1.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-1.png deleted file mode 100644 index dbc404dc8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-2.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-2.png deleted file mode 100644 index 4565cd6a8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-icon.png new file mode 100644 index 000000000..3f8ff8891 Binary files /dev/null and b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-icon.png differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-action.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-action.png deleted file mode 100644 index 4773ad6df..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-action.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-create-basic-info.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-create-basic-info.png deleted file mode 100644 index a5feb0d81..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-create-basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list-new.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list-new.png deleted file mode 100644 index 00f1a5005..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list-new.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list.png deleted file mode 100644 index 9e3abf3c8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/down-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/execution-record.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/execution-record.png deleted file mode 100644 index a7700af14..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/execution-record.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/finish-image.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/finish-image.png deleted file mode 100644 index 5121ef211..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/finish-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/input-busybox.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/input-busybox.png deleted file mode 100644 index 8cfcac8a4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/input-busybox.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-detail-page.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-detail-page.png deleted file mode 100644 index 66f9a1858..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-detail-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-list.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-list.png deleted file mode 100644 index 578a7a2c0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/start-command.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/start-command.png deleted file mode 100644 index 31ccc9b62..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/start-command.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonset-request-limit.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonset-request-limit.png deleted file mode 100644 index adbda110c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonset-request-limit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets.png deleted file mode 100644 index 413eb0e6b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_autorefresh_start.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_autorefresh_start.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_autorefresh_stop.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_autorefresh_stop.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail.png deleted file mode 100644 index 81b3d1866..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_operation_btn.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_operation_btn.png deleted file mode 100644 index b165768fa..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_operation_btn.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_pod.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_pod.png deleted file mode 100644 index ea1db92d4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_pod.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_state.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_state.png deleted file mode 100644 index 0cce1771d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_detail_state.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_env_variable.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_env_variable.png deleted file mode 100644 index 5e79a9aac..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_env_variable.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_events.png deleted file mode 100644 index 65cb59a09..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_1.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_1.png deleted file mode 100644 index 27e7b0c50..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_1.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_1.png deleted file mode 100644 index ac6834d58..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_btn.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_btn.png deleted file mode 100644 index 63b3f11f7..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_2_container_btn.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_3.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_3.png deleted file mode 100644 index ebda973f9..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_4.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_4.png deleted file mode 100644 index abd97ffd0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_form_4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_list.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_list.png deleted file mode 100644 index a0233051d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_metadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_metadata.png deleted file mode 100644 index 521458e40..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_metadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_monitoring.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_monitoring.png deleted file mode 100644 index cbee23a3a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_refresh.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_time_range.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_time_range.png deleted file mode 100644 index a01087a3c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/daemonsets_time_range.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/three-dots.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/daemonsets/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/add-container-image.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/add-container-image.PNG deleted file mode 100644 index bb0039755..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/add-container-image.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/advanced-settings.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/advanced-settings.PNG deleted file mode 100644 index ab3951f72..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/advanced-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-detail-page.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-detail-page.PNG deleted file mode 100644 index 50f8587b8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-detail-page.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-listed.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-listed.PNG deleted file mode 100644 index 14012fa16..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-listed.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-name.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-name.PNG deleted file mode 100644 index b504d9626..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployment-name.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-env-variables.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-env-variables.png deleted file mode 100644 index 1d1c847f8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-env-variables.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-events.png deleted file mode 100644 index a0bd9098c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-matadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-matadata.png deleted file mode 100644 index 03e33100d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-matadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-monitoring.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-monitoring.png deleted file mode 100644 index b16703385..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-time-range.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-time-range.png deleted file mode 100644 index c41e86695..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments-time-range.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments.PNG deleted file mode 100644 index 190dcea06..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_autorefresh_start.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_autorefresh_start.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_autorefresh_stop.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_autorefresh_stop.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_refresh.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/deployments_refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/down-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/input-image-name.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/input-image-name.PNG deleted file mode 100644 index b16fdff05..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/input-image-name.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/minus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/more-actions.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/more-actions.PNG deleted file mode 100644 index 0b8697b5c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/more-actions.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/mount-volumes.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/mount-volumes.PNG deleted file mode 100644 index 5d576ce6e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/mount-volumes.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/plus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/pod-details.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/pod-details.PNG deleted file mode 100644 index c5c3cee00..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/pod-details.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/replica-number.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/replica-number.PNG deleted file mode 100644 index ffb1a24f5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/replica-number.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-setting.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-setting.PNG deleted file mode 100644 index fbcc5bb4e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-setting.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-status.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-status.PNG deleted file mode 100644 index e3a0d5bce..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/resource-status.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/three-dots.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/up-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/deployments/up-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png deleted file mode 100644 index 6bc900a68..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/add-container-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png deleted file mode 100644 index 18c0e8721..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/busybox.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png deleted file mode 100644 index 34178a7db..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cancel-hpa.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png deleted file mode 100644 index eb5b84396..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/cpu-request.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png deleted file mode 100644 index b02e165a4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png deleted file mode 100644 index dee74b343..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/create-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png deleted file mode 100644 index 738d23662..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/deployment-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png deleted file mode 100644 index f27290dc6..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/horizontal-pod-autoscaling.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png deleted file mode 100644 index 2bd35c930..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png deleted file mode 100644 index 4e1c645b4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/hpa-parameters.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png deleted file mode 100644 index fd3d1817a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-decrease.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png deleted file mode 100644 index 91c487492..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/pods-increase.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png deleted file mode 100644 index 2be4ddc54..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/service-name.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png deleted file mode 100644 index c11881f26..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/start-command.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png deleted file mode 100644 index 7674cf63c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/stateless-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png deleted file mode 100644 index 58ebe0a22..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/target-cpu-utilization.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/three-dots.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/horizontal-pod-autoscaling/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/add-container-image-job.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/add-container-image-job.PNG deleted file mode 100644 index 5dc1a1df2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/add-container-image-job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-check.jpg b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-check.jpg deleted file mode 100644 index d6cd50c51..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-check.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-icon.png new file mode 100644 index 000000000..3f8ff8891 Binary files /dev/null and b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log-icon.png differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log.PNG deleted file mode 100644 index e9fa55c93..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/container-log.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/create-job.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/create-job.PNG deleted file mode 100644 index d397556d0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/create-job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/display.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/display.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/down-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/env-variable.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/env-variable.png deleted file mode 100644 index cf24df42b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/env-variable.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/events.png deleted file mode 100644 index e1c2550af..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-record.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-record.PNG deleted file mode 100644 index 9b5a899c6..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-record.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-records.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-records.png deleted file mode 100644 index 87123fb22..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/execution-records.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/hide.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/hide.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-container-settings.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-container-settings.PNG deleted file mode 100644 index 389aae5b9..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-container-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-basic-info.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-basic-info.PNG deleted file mode 100644 index 700467834..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-basic-info.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-job-settings.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-job-settings.PNG deleted file mode 100644 index 9869fab96..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-create-job-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-list-new.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-list-new.PNG deleted file mode 100644 index 3d935b6a1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-list-new.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-operation.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-operation.PNG deleted file mode 100644 index e9649ba0e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/job-operation.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/metadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/metadata.png deleted file mode 100644 index a45109815..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/metadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/refresh.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/resource-status.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/resource-status.png deleted file mode 100644 index d4d6cc661..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/resource-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/start-command-job.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/start-command-job.PNG deleted file mode 100644 index 7ac4ec5ab..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/jobs/start-command-job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-loadbalancer.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-loadbalancer.png deleted file mode 100644 index f2ec37152..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-nodeport.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-nodeport.png deleted file mode 100644 index 8bd390972..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-method-nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-loadbalancer.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-loadbalancer.png deleted file mode 100644 index 362ca43b3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-nodeport.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-nodeport.png deleted file mode 100644 index 5dded04b1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/access-route-nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/add-metadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/add-metadata.png deleted file mode 100644 index cd7c8fc87..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/add-metadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/auto-generate.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/auto-generate.png deleted file mode 100644 index c21c2c91c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/auto-generate.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/basic-info.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/basic-info.png deleted file mode 100644 index 0da768fe2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/create-route.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/create-route.png deleted file mode 100644 index 80ee8bcf0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/create-route.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/edit-route.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/edit-route.png deleted file mode 100644 index f8376a2fc..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/edit-route.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/events.png deleted file mode 100644 index b1269828e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/metadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/metadata.png deleted file mode 100644 index d34d9fda4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/metadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png deleted file mode 100644 index a2f3a83ff..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-loadbalancer.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-nodeport.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-nodeport.png deleted file mode 100644 index b08fa17ed..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/obtain-address-nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/resource-status.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/resource-status.png deleted file mode 100644 index 2f3c72a30..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/resource-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/route-list.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/route-list.png deleted file mode 100644 index 3fb54d385..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/route-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/set-gateway.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/set-gateway.png deleted file mode 100644 index 221805348..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/set-gateway.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/specify-domain.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/specify-domain.png deleted file mode 100644 index 3bd1dea66..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/routes/specify-domain.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/create-service-type.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/create-service-type.png deleted file mode 100644 index 55d949ede..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/create-service-type.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-events.png deleted file mode 100644 index b942d148a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-lists.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-lists.PNG deleted file mode 100644 index 333993df7..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-lists.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-matadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-matadata.png deleted file mode 100644 index dc0ee6bf3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-matadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-pods.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-pods.png deleted file mode 100644 index 2afde76b7..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-pods.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-resource-status.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-resource-status.png deleted file mode 100644 index 57a8e0995..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services-resource-status.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_display_containers.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_display_containers.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_hide_containers.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_hide_containers.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_refresh_pods.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/services_refresh_pods.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-detail.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-detail.PNG deleted file mode 100644 index 693a589de..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-detail.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-finish.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-finish.PNG deleted file mode 100644 index af18639f5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-finish.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-1.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-1.PNG deleted file mode 100644 index d00cae48d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-2.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-2.PNG deleted file mode 100644 index d8e34ed4c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-3.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-3.PNG deleted file mode 100644 index dfd29f84b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-4.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-4.PNG deleted file mode 100644 index 442708b43..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form.PNG b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form.PNG deleted file mode 100644 index e707b12db..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/stateless-form.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/services/three-dots.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/services/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/down-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/down-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/minus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/minus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/plus-icon.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/plus-icon.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulset-request-limit.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulset-request-limit.png deleted file mode 100644 index 2707038c4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulset-request-limit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-events.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-events.png deleted file mode 100644 index fe6b03977..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-events.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-evn-variables.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-evn-variables.png deleted file mode 100644 index eb5a9d547..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-evn-variables.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-matadata.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-matadata.png deleted file mode 100644 index 010f43df0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-matadata.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-monitoring.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-monitoring.png deleted file mode 100644 index ad9955751..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-monitoring.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-time-range.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-time-range.png deleted file mode 100644 index 447e76682..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets-time-range.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets.png deleted file mode 100644 index b635efa1c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_autorefresh_start.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_autorefresh_start.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_autorefresh_stop.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_autorefresh_stop.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail.png deleted file mode 100644 index 4b80da182..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_operation_btn.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_operation_btn.png deleted file mode 100644 index cdee3da8f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_operation_btn.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_pod.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_pod.png deleted file mode 100644 index 41765eb4c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_pod.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_state.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_state.png deleted file mode 100644 index cd3798374..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_detail_state.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_1.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_1.png deleted file mode 100644 index 1df6b330c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2.png deleted file mode 100644 index 7fa0d5d35..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_1.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_1.png deleted file mode 100644 index 279dcb27b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_btn.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_btn.png deleted file mode 100644 index 381f3063b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_2_container_btn.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_3.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_3.png deleted file mode 100644 index ba717495e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_4.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_4.png deleted file mode 100644 index e702f414b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_form_4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_list.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_list.png deleted file mode 100644 index aa495bad0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_refresh.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/statefulsets_refresh.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/three-dots.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/three-dots.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/up-arrow.png b/static/images/docs/zh-cn/project-user-guide/application-workloads/statefulsets/up-arrow.png old mode 100644 new mode 100755 diff --git a/static/images/docs/zh-cn/project-user-guide/applications/app-templates/app-store-1.png b/static/images/docs/zh-cn/project-user-guide/applications/app-templates/app-store-1.png deleted file mode 100644 index c8c0dd151..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/app-templates/app-store-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/app-templates/private-app-repository-2.png b/static/images/docs/zh-cn/project-user-guide/applications/app-templates/private-app-repository-2.png deleted file mode 100644 index 69c09e5e9..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/app-templates/private-app-repository-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png b/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png deleted file mode 100644 index 967327942..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/dashboard.png b/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/dashboard.png deleted file mode 100644 index adb547a1d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/review-page.png b/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/review-page.png deleted file mode 100644 index d55e1ca02..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/review-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/route.png b/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/route.png deleted file mode 100644 index 739eaf59e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/create-a-microservices-based-app/route.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png deleted file mode 100644 index 227705182..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/access-nginx.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png deleted file mode 100644 index faf304d35..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/confirm-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png deleted file mode 100644 index 11d06f812..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/deploy-nginx.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png deleted file mode 100644 index 45d41442e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-config-nginx.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png deleted file mode 100644 index 2368e8591..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png deleted file mode 100644 index 55e69d8a9..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/exposed-port.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png deleted file mode 100644 index 688bbef02..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/manifest-file.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png deleted file mode 100644 index c912a3fab..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-in-app-store.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png deleted file mode 100644 index 8526e8954..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png deleted file mode 100644 index 6d6cd8c7d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nginx-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png deleted file mode 100644 index 738b474d0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-store/nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png deleted file mode 100644 index 7088ba15d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/add-app-repo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png deleted file mode 100644 index bbe785163..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/app-config.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png deleted file mode 100644 index e9de77c74..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/click-eye-icon.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png deleted file mode 100644 index a56442fa1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/confirm-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png deleted file mode 100644 index a38f44134..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/create-new-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png deleted file mode 100644 index 3f41fb7d2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/deploy-grafana.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png deleted file mode 100644 index d7944bb56..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/edit-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png deleted file mode 100644 index b3733a669..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/exposed-port.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png deleted file mode 100644 index 6b94ed2d3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-UI.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png deleted file mode 100644 index 01bef1850..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png deleted file mode 100644 index e4b0d8a44..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/grafana-services.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png deleted file mode 100644 index 08c9d4f8d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/home-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png deleted file mode 100644 index 1571ca184..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/input-repo-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png deleted file mode 100644 index 75e022550..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/nodeport.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png deleted file mode 100644 index e928f9797..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/private-app-template.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png deleted file mode 100644 index 07987d462..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/repository-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png deleted file mode 100644 index 41a348431..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/search-grafana.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png deleted file mode 100644 index cd4b58373..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/secret-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png b/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png deleted file mode 100644 index d258eb7a2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/applications/deploy-apps-from-app-templates/select-app-templates.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/detail-page.png b/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/detail-page.png deleted file mode 100644 index 3a0d82b00..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/detail-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/key-value.jpg b/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/key-value.jpg deleted file mode 100644 index 9b30c94cf..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/key-value.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/use-configmap.jpg b/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/use-configmap.jpg deleted file mode 100644 index d7bc158e2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/configmaps/use-configmap.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/create-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/create-secret.png deleted file mode 100644 index 8a9e538d1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/create-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/harbor-address.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/harbor-address.png deleted file mode 100644 index a1e1b8c8f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/harbor-address.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/image-registry-info.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/image-registry-info.png deleted file mode 100644 index ed41c669f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/image-registry-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/open-dashboard.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/open-dashboard.png deleted file mode 100644 index 913db775f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/open-dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/qingcloud-registry.jpg b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/qingcloud-registry.jpg deleted file mode 100644 index 419b21461..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/qingcloud-registry.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/use-image-registry.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/use-image-registry.png deleted file mode 100644 index 414e3ca25..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/use-image-registry.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/validate-registry-address.png b/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/validate-registry-address.png deleted file mode 100644 index dd8a1b3e0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/image-registries/validate-registry-address.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/account-password-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/account-password-secret.png deleted file mode 100644 index 79d5e6323..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/account-password-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/create-secrets.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/create-secrets.png deleted file mode 100644 index 5e30ed6de..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/create-secrets.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/default-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/default-secret.png deleted file mode 100644 index 4bd35313b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/default-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/docker-hub-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/docker-hub-secret.png deleted file mode 100644 index 458273bbb..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/docker-hub-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/enter-key.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/enter-key.png deleted file mode 100644 index 79c3e088e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/enter-key.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/github-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/github-secret.png deleted file mode 100644 index 53970dc17..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/github-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/image-registry-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/image-registry-secret.png deleted file mode 100644 index ab7febce4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/image-registry-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-detail-page.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-detail-page.png deleted file mode 100644 index 719f70091..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-detail-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-dropdown-menu.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-dropdown-menu.png deleted file mode 100644 index c9c20b16b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-dropdown-menu.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-list.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-list.png deleted file mode 100644 index 68aeb5e00..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-type.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-type.png deleted file mode 100644 index 345895067..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/secret-type.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/set-secret.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/set-secret.png deleted file mode 100644 index 1b37d8bb8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/set-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/tls.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/tls.png deleted file mode 100644 index 865dd05bf..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/tls.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-image.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-image.png deleted file mode 100644 index 259f4ec40..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-repository.png b/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-repository.png deleted file mode 100644 index cbd3db35b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/configurations/secrets/use-secret-repository.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-configurations.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-configurations.png deleted file mode 100644 index 7b85bf0a8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-configurations.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png deleted file mode 100644 index af2d0151d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-exporter-ready.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-monitoring-dashboard.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-monitoring-dashboard.png deleted file mode 100644 index 83029e30e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-monitoring-dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png deleted file mode 100644 index 8f2995417..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-ready.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png deleted file mode 100644 index 7c1019d2f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-root-password.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/app-template-create.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/app-template-create.PNG deleted file mode 100644 index 8d7a3c8ca..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/app-template-create.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-create-app-template.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-create-app-template.PNG deleted file mode 100644 index 239abc49b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-create-app-template.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-sample-web-1.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-sample-web-1.PNG deleted file mode 100644 index d8a00d97c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-sample-web-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-2.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-2.PNG deleted file mode 100644 index 2e89f0c72..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-4.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-4.PNG deleted file mode 100644 index cc01f32f0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-5.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-5.PNG deleted file mode 100644 index 3ff8844e4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-6.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-6.PNG deleted file mode 100644 index ddaf764d3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template.PNG deleted file mode 100644 index febdf5efb..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-1.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-1.PNG deleted file mode 100644 index 33a698327..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-10.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-10.PNG deleted file mode 100644 index 1fda03d3c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-10.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-2.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-2.PNG deleted file mode 100644 index 5e2426732..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-3.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-3.PNG deleted file mode 100644 index bf2a28a97..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-4.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-4.PNG deleted file mode 100644 index 3abc2f7b9..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-5.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-5.PNG deleted file mode 100644 index 5569603d4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-6.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-6.PNG deleted file mode 100644 index 328aaaf2b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-7.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-7.PNG deleted file mode 100644 index d692741ab..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-8.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-8.PNG deleted file mode 100644 index 883c378b7..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-8.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-9.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-9.PNG deleted file mode 100644 index 8e88e70dc..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-9.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-1.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-1.PNG deleted file mode 100644 index ddaf764d3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-1.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-2.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-2.PNG deleted file mode 100644 index 46181a98d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-3.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-3.PNG deleted file mode 100644 index 966ddef3c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-3.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-4.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-4.PNG deleted file mode 100644 index f3af0593b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-5.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-5.PNG deleted file mode 100644 index b3b654565..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-5.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-6.PNG b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-6.PNG deleted file mode 100644 index 0a0af59a4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/plus-icon.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/plus-icon.png new file mode 100644 index 000000000..43f67656e Binary files /dev/null and b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/plus-icon.png differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png deleted file mode 100644 index f689f955b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/graph-chart.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png deleted file mode 100644 index b54db163a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/charts/text-chart.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-items.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-items.png deleted file mode 100644 index 1b11920d6..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-items.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-text-chart.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-text-chart.png deleted file mode 100644 index c52607833..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/add-text-chart.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/chart-detail.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/chart-detail.png deleted file mode 100644 index 9c654e69e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/chart-detail.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png deleted file mode 100644 index d659434f5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/create-dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-dashboard.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-dashboard.png deleted file mode 100644 index 87448933d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-dashboard.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-yaml.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-yaml.png deleted file mode 100644 index 469d7bad2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/edit-yaml.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/middle-column.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/middle-column.png deleted file mode 100644 index 04ac23540..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/middle-column.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png b/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png deleted file mode 100644 index 9e0420e17..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/custom-application-monitoring/visualization/overview/plus-btn.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.PNG deleted file mode 100644 index 79d08fd83..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.PNG deleted file mode 100644 index f98034d91..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.PNG deleted file mode 100644 index 565256eb5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.PNG deleted file mode 100644 index ca75abc37..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.PNG deleted file mode 100644 index 7e5aa3ea2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-4.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-4.PNG deleted file mode 100644 index 4c1083f17..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-4.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-5.gif b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-5.gif deleted file mode 100644 index 76fef76fc..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-5.gif and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-job.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-job.PNG deleted file mode 100644 index f11f5d38e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release-job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release_6.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release_6.png deleted file mode 100644 index 6a275ed32..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary-release_6.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary.gif b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary.gif deleted file mode 100644 index cbc2b786d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/canary.gif and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/deployment-list_1.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/deployment-list_1.png deleted file mode 100644 index 9782e7d04..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/deployment-list_1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/finish-canary-release.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/finish-canary-release.png deleted file mode 100644 index 5026d00ce..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/finish-canary-release.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/job-offline.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/job-offline.PNG deleted file mode 100644 index 561190b8a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/job-offline.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-release.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-release.png deleted file mode 100644 index b8b4460d7..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-release.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-traffic.PNG b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-traffic.PNG deleted file mode 100644 index 65390b9b4..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/take-over-traffic.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/topology.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/topology.png deleted file mode 100644 index be678201b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/topology.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png deleted file mode 100644 index 3334c4c5e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing-kubesphere.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing.png deleted file mode 100644 index a1899597c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/tracing.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/traffic-management.png b/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/traffic-management.png deleted file mode 100644 index 185cbe2f5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/canary-release/traffic-management.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg b/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg deleted file mode 100644 index d5a7cb7b5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg b/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg deleted file mode 100644 index bbb3e20d2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg b/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg deleted file mode 100644 index b16d1b1ef..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg b/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg deleted file mode 100644 index a7864b60e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-task.jpg b/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-task.jpg deleted file mode 100644 index 17d7743a0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-task.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/access-service.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/access-service.PNG deleted file mode 100644 index b024958cc..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/access-service.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/advanced-settings.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/advanced-settings.PNG deleted file mode 100644 index 52cbf136a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/advanced-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-settings.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-settings.PNG deleted file mode 100644 index 15b25da09..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-settings-2.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-settings-2.PNG deleted file mode 100644 index e77cf7605..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-settings-2.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-status.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-status.PNG deleted file mode 100644 index 7ea415d82..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-status.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building.PNG deleted file mode 100644 index 849f62d07..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/container-settings.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/container-settings.PNG deleted file mode 100644 index cfc3e07f5..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/container-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/create-service.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/create-service.PNG deleted file mode 100644 index 647d9ab6b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/create-service.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/deployment.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/deployment.PNG deleted file mode 100644 index bfc349dc6..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/deployment.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image-pushed.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image-pushed.PNG deleted file mode 100644 index adb3ee6af..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image-pushed.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image.PNG deleted file mode 100644 index 554f12409..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/exposed-port.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/exposed-port.PNG deleted file mode 100644 index 42c92f572..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/exposed-port.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-builder.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-builder.PNG deleted file mode 100644 index aff43cac0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-builder.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-success.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-success.PNG deleted file mode 100644 index df6544d7c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-success.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-log.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-log.PNG deleted file mode 100644 index 7ecf4fc08..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-log.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-logs.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-logs.PNG deleted file mode 100644 index a0d71ffb0..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-logs.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job-created.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job-created.PNG deleted file mode 100644 index d6756d1b1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job-created.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job.PNG deleted file mode 100644 index 5c1b83ec6..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service.PNG deleted file mode 100644 index ef9ed43d2..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/successful.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/successful.PNG deleted file mode 100644 index 74158664d..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/successful.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/upload-artifact.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/upload-artifact.PNG deleted file mode 100644 index 829a0043a..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/upload-artifact.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-log.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-log.PNG deleted file mode 100644 index 7bc0b04f8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-log.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build_settings.png b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build_settings.png deleted file mode 100644 index 4aafa37bb..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build_settings.png and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/building.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/building.PNG deleted file mode 100644 index 48f66f0d8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/building.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/copy-repo-code.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/copy-repo-code.PNG deleted file mode 100644 index 04afdc83b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/copy-repo-code.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-finish.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-finish.PNG deleted file mode 100644 index d82a2fd73..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-finish.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-service.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-service.PNG deleted file mode 100644 index 961e5e108..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-service.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/deployment.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/deployment.PNG deleted file mode 100644 index ec36e327c..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/deployment.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/docker-image.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/docker-image.PNG deleted file mode 100644 index d21ec6a13..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/docker-image.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/fork-repository.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/fork-repository.PNG deleted file mode 100644 index 5dc46c69f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/fork-repository.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/health-checker.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/health-checker.PNG deleted file mode 100644 index 03fa6edc3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/health-checker.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/job.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/job.PNG deleted file mode 100644 index 3cdc3eabb..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/job.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/select-lang-type.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/select-lang-type.PNG deleted file mode 100644 index af2393d29..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/select-lang-type.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-detail.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-detail.PNG deleted file mode 100644 index 2f3f9377b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-detail.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-settings.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-settings.PNG deleted file mode 100644 index a5fcb5696..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-settings.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service.PNG deleted file mode 100644 index 8a4e341e1..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/successful-result.PNG b/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/successful-result.PNG deleted file mode 100644 index d117e2e46..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/successful-result.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg deleted file mode 100644 index f3d0dc162..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volume-snapshots/apply-volume.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/delete-volume.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/delete-volume.jpg deleted file mode 100644 index 508a7cd8e..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/delete-volume.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/local-pending.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/local-pending.jpg deleted file mode 100644 index dd9d5802b..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/local-pending.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/select-storage-class.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/select-storage-class.jpg deleted file mode 100644 index ccc15f1d8..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/select-storage-class.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-detail-page.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-detail-page.jpg deleted file mode 100644 index a6893365f..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-detail-page.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-monitoring.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-monitoring.jpg deleted file mode 100644 index d71ee8340..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-monitoring.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-page.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-page.jpg deleted file mode 100644 index 5c5aeb566..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-page.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-status.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-status.jpg deleted file mode 100644 index 471a05bfc..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volume-status.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volumebindingmode.jpg b/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volumebindingmode.jpg deleted file mode 100644 index ffe2335f3..000000000 Binary files a/static/images/docs/zh-cn/project-user-guide/volume-management/volumes/volumebindingmode.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/all-in-one-on-linux/service-components.png b/static/images/docs/zh-cn/quickstart/all-in-one-on-linux/service-components.png deleted file mode 100644 index aa3d8322f..000000000 Binary files a/static/images/docs/zh-cn/quickstart/all-in-one-on-linux/service-components.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/完成网关设置.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/完成网关设置.png deleted file mode 100644 index 627c94527..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/完成网关设置.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/帐户列表.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/帐户列表.png deleted file mode 100644 index 185e80e12..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/帐户列表.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/添加用户.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/添加用户.png deleted file mode 100644 index 4fc20061c..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/添加用户.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请devops成员.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请devops成员.png deleted file mode 100644 index 2f41fec20..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请devops成员.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请列表.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请列表.png deleted file mode 100644 index 729fbb53a..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请列表.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请成员至项目.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请成员至项目.png deleted file mode 100644 index e72c701c2..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/邀请成员至项目.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/项目配额.png b/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/项目配额.png deleted file mode 100644 index d1df52bd1..000000000 Binary files a/static/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/项目配额.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-bookinfo1.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-bookinfo1.png deleted file mode 100644 index 418861da7..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-bookinfo1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-to-visit.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-to-visit.png deleted file mode 100644 index 1f9febae6..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/click-to-visit.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png deleted file mode 100644 index cccc77941..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/create-bookinfo.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/details-page.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/details-page.png deleted file mode 100644 index 00bbf6c56..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/details-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running.png deleted file mode 100644 index 1a7b21be1..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running1.png b/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running1.png deleted file mode 100644 index c477bc610..000000000 Binary files a/static/images/docs/zh-cn/quickstart/deploy-bookinfo-to-k8s/running1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/CRD.png b/static/images/docs/zh-cn/quickstart/enable-pluggable-components/CRD.png deleted file mode 100644 index 2ee216f8d..000000000 Binary files a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/CRD.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/服务组件.jpg b/static/images/docs/zh-cn/quickstart/enable-pluggable-components/服务组件.jpg deleted file mode 100644 index b976bc7cb..000000000 Binary files a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/服务组件.jpg and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/编辑配置文件.png b/static/images/docs/zh-cn/quickstart/enable-pluggable-components/编辑配置文件.png deleted file mode 100644 index 9e94a99bf..000000000 Binary files a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/编辑配置文件.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/集群管理.png b/static/images/docs/zh-cn/quickstart/enable-pluggable-components/集群管理.png deleted file mode 100644 index 0b60261b8..000000000 Binary files a/static/images/docs/zh-cn/quickstart/enable-pluggable-components/集群管理.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/minimal-kubesphere-on-k8s/kubesphere-components.png b/static/images/docs/zh-cn/quickstart/minimal-kubesphere-on-k8s/kubesphere-components.png deleted file mode 100644 index b1681c0c8..000000000 Binary files a/static/images/docs/zh-cn/quickstart/minimal-kubesphere-on-k8s/kubesphere-components.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/access-method.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/access-method.png deleted file mode 100644 index 3c853a479..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/access-method.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-container.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-container.png deleted file mode 100644 index 6881720a4..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-container.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-service.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-service.png deleted file mode 100644 index 9e1b173fd..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-service.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-volume-page.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-volume-page.png deleted file mode 100644 index 22faa7456..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/add-volume-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting.png deleted file mode 100644 index 40c896d4d..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting1.png deleted file mode 100644 index f590d133c..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced-setting1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced.png deleted file mode 100644 index edb0d9203..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced1.png deleted file mode 100644 index 981615705..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/advanced1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created.png deleted file mode 100644 index a5aca541e..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created1.png deleted file mode 100644 index dea2a8e00..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/application-created1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/basic-info.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/basic-info.png deleted file mode 100644 index 7bdd64b73..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/basic-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/choose-existing-volume.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/choose-existing-volume.png deleted file mode 100644 index 798b9c333..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/choose-existing-volume.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/components-finished.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/components-finished.png deleted file mode 100644 index 9838d404f..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/components-finished.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app.png deleted file mode 100644 index 6ad5d9da2..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app1.png deleted file mode 100644 index 0bec51521..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/composing-app1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image-page.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image-page.png deleted file mode 100644 index 748b2905c..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image.png deleted file mode 100644 index fb248ff70..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/container-image.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret.png deleted file mode 100644 index 0cdbc2946..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret1.png deleted file mode 100644 index e535ab8c8..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/create-secret1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access.png deleted file mode 100644 index a31db95ca..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access1.png deleted file mode 100644 index 50198f2f5..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/edit-internet-access1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-var.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-var.png deleted file mode 100644 index 1421154dc..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-var.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-varss.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-varss.png deleted file mode 100644 index ba0c8b976..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/environment-varss.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/ingress-create.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/ingress-create.png deleted file mode 100644 index 2579dfbce..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/ingress-create.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/key-value.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/key-value.png deleted file mode 100644 index 4e9b9b7e1..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/key-value.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mount-volume-page.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/mount-volume-page.png deleted file mode 100644 index fa38998a0..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mount-volume-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-finished.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-finished.png deleted file mode 100644 index 8f6670b7f..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-finished.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running.png deleted file mode 100644 index b3860f460..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running1.png deleted file mode 100644 index 6d519370f..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysql-running1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysqlname.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysqlname.png deleted file mode 100644 index a9c39444d..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/mysqlname.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/name-wordpress.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/name-wordpress.png deleted file mode 100644 index 81dd99992..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/name-wordpress.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number.png deleted file mode 100644 index 703ee6753..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number1.png deleted file mode 100644 index 5a0f33e2a..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/nodeport-number1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings.png deleted file mode 100644 index 3311b4a9f..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings1.png deleted file mode 100644 index bc42e7559..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-settings1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template.png deleted file mode 100644 index db247af8e..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template1.png deleted file mode 100644 index b94777116..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volume-template1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes.png deleted file mode 100644 index b499fb661..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes1.png deleted file mode 100644 index 63d73d67e..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/volumes1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment.png deleted file mode 100644 index 9e10a494b..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment1.png deleted file mode 100644 index e6c354edd..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-deployment1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets.png deleted file mode 100644 index 61dde6cc2..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets.png and /dev/null differ diff --git a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets1.png b/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets1.png deleted file mode 100644 index 3cfc8a4a3..000000000 Binary files a/static/images/docs/zh-cn/quickstart/wordpress-deployment/wordpress-secrets1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-log-filter.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-log-filter.PNG deleted file mode 100644 index 035167608..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-log-filter.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating-ui.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating-ui.PNG deleted file mode 100644 index c1a547fe5..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating-ui.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating.PNG deleted file mode 100644 index b2bdb33ad..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/auditing-operating.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/user-changed.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/user-changed.PNG deleted file mode 100644 index c98bcc356..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-log-query/user-changed.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/审计日志详情.png b/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/审计日志详情.png deleted file mode 100644 index 7b6e49c59..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/审计日志详情.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/操作审计.png b/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/操作审计.png deleted file mode 100644 index 12a80d037..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/操作审计.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/过滤审计日志.png b/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/过滤审计日志.png deleted file mode 100644 index f1607c2f1..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-logs/过滤审计日志.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/alerting-archiving-rule.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/alerting-archiving-rule.PNG deleted file mode 100644 index ab18343f5..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/alerting-archiving-rule.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/auditing-crd.PNG b/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/auditing-crd.PNG deleted file mode 100644 index 29c83d97a..000000000 Binary files a/static/images/docs/zh-cn/toolbox/auditing/auditing-rules/auditing-crd.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/event-query/event-details.png b/static/images/docs/zh-cn/toolbox/event-query/event-details.png deleted file mode 100644 index 84c8cb90d..000000000 Binary files a/static/images/docs/zh-cn/toolbox/event-query/event-details.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/event-query/event-search-list.png b/static/images/docs/zh-cn/toolbox/event-query/event-search-list.png deleted file mode 100644 index 5cd9e4de3..000000000 Binary files a/static/images/docs/zh-cn/toolbox/event-query/event-search-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/event-query/event-search.png b/static/images/docs/zh-cn/toolbox/event-query/event-search.png deleted file mode 100644 index 1de1b5787..000000000 Binary files a/static/images/docs/zh-cn/toolbox/event-query/event-search.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/index/toolbox.PNG b/static/images/docs/zh-cn/toolbox/index/toolbox.PNG deleted file mode 100644 index 5f85bde03..000000000 Binary files a/static/images/docs/zh-cn/toolbox/index/toolbox.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/container-detail-page.png b/static/images/docs/zh-cn/toolbox/log-query/container-detail-page.png deleted file mode 100644 index bbc5a4d68..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/container-detail-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/log-search-conditions.png b/static/images/docs/zh-cn/toolbox/log-query/log-search-conditions.png deleted file mode 100644 index 1f172f60b..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/log-search-conditions.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/log-search-details-page.png b/static/images/docs/zh-cn/toolbox/log-query/log-search-details-page.png deleted file mode 100644 index 6a2364407..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/log-search-details-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/log-search-list.png b/static/images/docs/zh-cn/toolbox/log-query/log-search-list.png deleted file mode 100644 index 00e32b222..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/log-search-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/log-search.png b/static/images/docs/zh-cn/toolbox/log-query/log-search.png deleted file mode 100644 index 2bad59ae8..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/log-search.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/log-query/pod-details-page.png b/static/images/docs/zh-cn/toolbox/log-query/pod-details-page.png deleted file mode 100644 index 23327e994..000000000 Binary files a/static/images/docs/zh-cn/toolbox/log-query/pod-details-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/enable-billing/计量计费信息.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/enable-billing/计量计费信息.png deleted file mode 100644 index ecfcdf91e..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/enable-billing/计量计费信息.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png deleted file mode 100644 index 2d64e465b..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/cluster-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/deployment-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/deployment-page.png deleted file mode 100644 index c025aecb3..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/deployment-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/node-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/node-page.png deleted file mode 100644 index f2a00c485..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/node-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/pod-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/pod-page.png deleted file mode 100644 index 4535d5236..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/pod-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/project-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/project-page.png deleted file mode 100644 index 57793afc5..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/project-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workload-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workload-page.png deleted file mode 100644 index 41e515564..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workload-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png b/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png deleted file mode 100644 index 3bbbc2a38..000000000 Binary files a/static/images/docs/zh-cn/toolbox/metering-and-billing/view-resource-consumption/workspace-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-enter.PNG b/static/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-enter.PNG deleted file mode 100644 index af632169b..000000000 Binary files a/static/images/docs/zh-cn/toolbox/web-kubectl/web-kubectl-enter.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-info-dialogue-2.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-info-dialogue-2.png deleted file mode 100644 index d666fa300..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-info-dialogue-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-1.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-1.png deleted file mode 100644 index 09f9f110f..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-list-4.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-list-4.png deleted file mode 100644 index 9220e2cb1..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/app-repo-list-4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/validate-link-3.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/validate-link-3.png deleted file mode 100644 index c1d08d571..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/app-repository/import-helm-repository/validate-link-3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/project-quotas.PNG b/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/project-quotas.PNG deleted file mode 100644 index f06b9df19..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/project-quotas.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-other-resource-quotas.PNG b/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-other-resource-quotas.PNG deleted file mode 100644 index 1ce6cbb30..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-other-resource-quotas.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-project-quotas.PNG b/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-project-quotas.PNG deleted file mode 100644 index df5a11e98..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/project-quotas/set-project-quotas.PNG and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/edit-existing-user.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/edit-existing-user.png deleted file mode 100644 index 2d292770f..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/edit-existing-user.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-details.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-details.png deleted file mode 100644 index 1705b3938..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-details.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-list.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-list.png deleted file mode 100644 index a9b3f8c67..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/role-and-member-management/role-list.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/confirm-upload-3.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/confirm-upload-3.png deleted file mode 100644 index 82f5e920b..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/confirm-upload-3.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/draft-app-5.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/draft-app-5.png deleted file mode 100644 index 7993c33fe..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/draft-app-5.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-app-template-1.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-app-template-1.png deleted file mode 100644 index 6e89ad1e3..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-app-template-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-helm-2.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-helm-2.png deleted file mode 100644 index 646aa74a3..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-helm-2.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-icon-4.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-icon-4.png deleted file mode 100644 index 23db60dfa..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/upload-icon-4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/version-page-6.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/version-page-6.png deleted file mode 100644 index a6d3c455d..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/upload-helm-based-applications/version-page-6.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-network-isolation/workspace-isolation-page.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-network-isolation/workspace-isolation-page.png deleted file mode 100644 index f50fb1856..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-network-isolation/workspace-isolation-page.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/delete_workspace.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/delete_workspace.png deleted file mode 100644 index 747c4ed71..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/delete_workspace.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/set-workspace-info.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/set-workspace-info.png deleted file mode 100644 index 23f413882..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/set-workspace-info.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-created.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-created.png deleted file mode 100644 index 0091d93e1..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-created.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-list-1.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-list-1.png deleted file mode 100644 index b02bd70e8..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-list-1.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-overview-4.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-overview-4.png deleted file mode 100644 index 7da8a9e8a..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-overview/workspace-overview-4.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/edit-workspace-quotas.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/edit-workspace-quotas.png deleted file mode 100644 index 821e22562..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/edit-workspace-quotas.png and /dev/null differ diff --git a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/slider.png b/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/slider.png deleted file mode 100644 index 883679930..000000000 Binary files a/static/images/docs/zh-cn/workspace-administration-and-user-guide/workspace-quotas/slider.png and /dev/null differ diff --git a/static/images/learn/actions-download.svg b/static/images/learn/actions-download.svg new file mode 100644 index 000000000..1cc896f94 --- /dev/null +++ b/static/images/learn/actions-download.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/actions-picture-active.svg b/static/images/learn/actions-picture-active.svg new file mode 100644 index 000000000..69152f7be --- /dev/null +++ b/static/images/learn/actions-picture-active.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/static/images/learn/actions-picture-open.svg b/static/images/learn/actions-picture-open.svg new file mode 100644 index 000000000..706f75b6f --- /dev/null +++ b/static/images/learn/actions-picture-open.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/static/images/learn/actions-picture.svg b/static/images/learn/actions-picture.svg new file mode 100644 index 000000000..bc603dd0e --- /dev/null +++ b/static/images/learn/actions-picture.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/static/images/learn/actions-ppt.svg b/static/images/learn/actions-ppt.svg new file mode 100644 index 000000000..23f9ded5a --- /dev/null +++ b/static/images/learn/actions-ppt.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/guofeng.png b/static/images/learn/guofeng.png new file mode 100644 index 000000000..47c1e388f Binary files /dev/null and b/static/images/learn/guofeng.png differ diff --git a/static/images/learn/icon-setion-close.svg b/static/images/learn/icon-setion-close.svg new file mode 100644 index 000000000..4bc7ae8f1 --- /dev/null +++ b/static/images/learn/icon-setion-close.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/icon-setion-open.svg b/static/images/learn/icon-setion-open.svg new file mode 100644 index 000000000..9d06ef23c --- /dev/null +++ b/static/images/learn/icon-setion-open.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/juntao.png b/static/images/learn/juntao.png new file mode 100644 index 000000000..39bb40310 Binary files /dev/null and b/static/images/learn/juntao.png differ diff --git a/static/images/learn/leify.jpeg b/static/images/learn/leify.jpeg new file mode 100644 index 000000000..1e44c1602 Binary files /dev/null and b/static/images/learn/leify.jpeg differ diff --git a/static/images/learn/leiwj.png b/static/images/learn/leiwj.png new file mode 100644 index 000000000..a027d4032 Binary files /dev/null and b/static/images/learn/leiwj.png differ diff --git a/static/images/learn/lesson-video-hover.svg b/static/images/learn/lesson-video-hover.svg new file mode 100644 index 000000000..7509f1b23 --- /dev/null +++ b/static/images/learn/lesson-video-hover.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/lesson-video-play.svg b/static/images/learn/lesson-video-play.svg new file mode 100644 index 000000000..1e76639b9 --- /dev/null +++ b/static/images/learn/lesson-video-play.svg @@ -0,0 +1,17 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/static/images/learn/lesson-video.svg b/static/images/learn/lesson-video.svg new file mode 100644 index 000000000..951bae21e --- /dev/null +++ b/static/images/learn/lesson-video.svg @@ -0,0 +1,3 @@ + + + diff --git a/static/images/learn/lihui.png b/static/images/learn/lihui.png new file mode 100644 index 000000000..749287a76 Binary files /dev/null and b/static/images/learn/lihui.png differ diff --git a/static/images/learn/mayan.png b/static/images/learn/mayan.png new file mode 100644 index 000000000..3c0573de1 Binary files /dev/null and b/static/images/learn/mayan.png differ diff --git a/static/images/learn/shizg.png b/static/images/learn/shizg.png new file mode 100644 index 000000000..868131dba Binary files /dev/null and b/static/images/learn/shizg.png differ diff --git a/static/images/learn/zhangliang.png b/static/images/learn/zhangliang.png new file mode 100644 index 000000000..5a4f8069f Binary files /dev/null and b/static/images/learn/zhangliang.png differ diff --git a/static/images/learn/zhuhan.png b/static/images/learn/zhuhan.png new file mode 100644 index 000000000..c7165ac0f Binary files /dev/null and b/static/images/learn/zhuhan.png differ diff --git a/static/images/live/arrow-hover.svg b/static/images/live/arrow-hover.svg new file mode 100644 index 000000000..669f02027 --- /dev/null +++ b/static/images/live/arrow-hover.svg @@ -0,0 +1,4 @@ + + + + diff --git a/static/images/live/arrow.svg b/static/images/live/arrow.svg new file mode 100644 index 000000000..a95ce13ed --- /dev/null +++ b/static/images/live/arrow.svg @@ -0,0 +1,4 @@ + + + + diff --git a/static/images/live/cloudnative-live-banner.png b/static/images/live/cloudnative-live-banner.png new file mode 100644 index 000000000..8426c6b9b Binary files /dev/null and b/static/images/live/cloudnative-live-banner.png differ diff --git a/static/images/live/cloudnative-live-cover.png b/static/images/live/cloudnative-live-cover.png deleted file mode 100644 index 1ef38b81c..000000000 Binary files a/static/images/live/cloudnative-live-cover.png and /dev/null differ diff --git a/static/images/news/kcd-china/guest-speakers.png b/static/images/news/kcd-china/guest-speakers.png new file mode 100644 index 000000000..cf1af8c9e Binary files /dev/null and b/static/images/news/kcd-china/guest-speakers.png differ diff --git a/static/images/news/kcd-china/kcd-beijing-organizers.png b/static/images/news/kcd-china/kcd-beijing-organizers.png new file mode 100644 index 000000000..bdbd2846c Binary files /dev/null and b/static/images/news/kcd-china/kcd-beijing-organizers.png differ diff --git a/static/images/news/kcd-china/kcd-china-event.png b/static/images/news/kcd-china/kcd-china-event.png new file mode 100644 index 000000000..0acb31ad8 Binary files /dev/null and b/static/images/news/kcd-china/kcd-china-event.png differ diff --git a/static/images/news/kcd-china/kcd-event-2.png b/static/images/news/kcd-china/kcd-event-2.png new file mode 100644 index 000000000..5ddf0526b Binary files /dev/null and b/static/images/news/kcd-china/kcd-event-2.png differ diff --git a/static/images/news/kcd-china/kcd-event-3.png b/static/images/news/kcd-china/kcd-event-3.png new file mode 100644 index 000000000..a237ea8df Binary files /dev/null and b/static/images/news/kcd-china/kcd-event-3.png differ diff --git a/static/images/news/kcd-china/kcd-event.png b/static/images/news/kcd-china/kcd-event.png new file mode 100644 index 000000000..8453daca4 Binary files /dev/null and b/static/images/news/kcd-china/kcd-event.png differ diff --git a/static/images/news/kubecon-china-2021/aws-interview.png b/static/images/news/kubecon-china-2021/aws-interview.png new file mode 100644 index 000000000..510a3cbc3 Binary files /dev/null and b/static/images/news/kubecon-china-2021/aws-interview.png differ diff --git a/static/images/news/kubecon-china-2021/banner.png b/static/images/news/kubecon-china-2021/banner.png new file mode 100644 index 000000000..93eb2eca0 Binary files /dev/null and b/static/images/news/kubecon-china-2021/banner.png differ diff --git a/static/images/news/kubecon-china-2021/huo-binjie.png b/static/images/news/kubecon-china-2021/huo-binjie.png new file mode 100644 index 000000000..a16c6e7c0 Binary files /dev/null and b/static/images/news/kubecon-china-2021/huo-binjie.png differ diff --git a/static/images/news/kubecon-china-2021/lai-zhengyi.png b/static/images/news/kubecon-china-2021/lai-zhengyi.png new file mode 100644 index 000000000..c973724dd Binary files /dev/null and b/static/images/news/kubecon-china-2021/lai-zhengyi.png differ diff --git a/static/images/news/kubecon-china-2021/lei-wanjun.png b/static/images/news/kubecon-china-2021/lei-wanjun.png new file mode 100644 index 000000000..f68313526 Binary files /dev/null and b/static/images/news/kubecon-china-2021/lei-wanjun.png differ diff --git a/static/images/news/kubecon-china-2021/openebs-office-hours.png b/static/images/news/kubecon-china-2021/openebs-office-hours.png new file mode 100644 index 000000000..913a01e57 Binary files /dev/null and b/static/images/news/kubecon-china-2021/openebs-office-hours.png differ diff --git a/static/images/news/kubecon-china-2021/wan-hongming.png b/static/images/news/kubecon-china-2021/wan-hongming.png new file mode 100644 index 000000000..70633fc56 Binary files /dev/null and b/static/images/news/kubecon-china-2021/wan-hongming.png differ diff --git a/static/json/kubesphere-3.1.json b/static/json/kubesphere-3.1.json index 0b0a2356b..25ab50e71 100644 --- a/static/json/kubesphere-3.1.json +++ b/static/json/kubesphere-3.1.json @@ -18176,7 +18176,7 @@ } }, "v1.GlusterfsVolumeSource": { - "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "description": "Represents a GlusterFS mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", "required": [ "endpoints", "path" diff --git a/static/swiper/swiper-bundle.min.css b/static/swiper/swiper-bundle.min.css new file mode 100644 index 000000000..531c09aff --- /dev/null +++ b/static/swiper/swiper-bundle.min.css @@ -0,0 +1,13 @@ +/** + * Swiper 7.2.0 + * Most modern mobile touch slider and framework with hardware accelerated transitions + * https://swiperjs.com + * + * Copyright 2014-2021 Vladimir Kharlampidi + * + * Released under the MIT License + * + * Released on: October 27, 2021 + */ + +@font-face{font-family:swiper-icons;src:url('data:application/font-woff;charset=utf-8;base64, d09GRgABAAAAAAZgABAAAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAAGRAAAABoAAAAci6qHkUdERUYAAAWgAAAAIwAAACQAYABXR1BPUwAABhQAAAAuAAAANuAY7+xHU1VCAAAFxAAAAFAAAABm2fPczU9TLzIAAAHcAAAASgAAAGBP9V5RY21hcAAAAkQAAACIAAABYt6F0cBjdnQgAAACzAAAAAQAAAAEABEBRGdhc3AAAAWYAAAACAAAAAj//wADZ2x5ZgAAAywAAADMAAAD2MHtryVoZWFkAAABbAAAADAAAAA2E2+eoWhoZWEAAAGcAAAAHwAAACQC9gDzaG10eAAAAigAAAAZAAAArgJkABFsb2NhAAAC0AAAAFoAAABaFQAUGG1heHAAAAG8AAAAHwAAACAAcABAbmFtZQAAA/gAAAE5AAACXvFdBwlwb3N0AAAFNAAAAGIAAACE5s74hXjaY2BkYGAAYpf5Hu/j+W2+MnAzMYDAzaX6QjD6/4//Bxj5GA8AuRwMYGkAPywL13jaY2BkYGA88P8Agx4j+/8fQDYfA1AEBWgDAIB2BOoAeNpjYGRgYNBh4GdgYgABEMnIABJzYNADCQAACWgAsQB42mNgYfzCOIGBlYGB0YcxjYGBwR1Kf2WQZGhhYGBiYGVmgAFGBiQQkOaawtDAoMBQxXjg/wEGPcYDDA4wNUA2CCgwsAAAO4EL6gAAeNpj2M0gyAACqxgGNWBkZ2D4/wMA+xkDdgAAAHjaY2BgYGaAYBkGRgYQiAHyGMF8FgYHIM3DwMHABGQrMOgyWDLEM1T9/w8UBfEMgLzE////P/5//f/V/xv+r4eaAAeMbAxwIUYmIMHEgKYAYjUcsDAwsLKxc3BycfPw8jEQA/gZBASFhEVExcQlJKWkZWTl5BUUlZRVVNXUNTQZBgMAAMR+E+gAEQFEAAAAKgAqACoANAA+AEgAUgBcAGYAcAB6AIQAjgCYAKIArAC2AMAAygDUAN4A6ADyAPwBBgEQARoBJAEuATgBQgFMAVYBYAFqAXQBfgGIAZIBnAGmAbIBzgHsAAB42u2NMQ6CUAyGW568x9AneYYgm4MJbhKFaExIOAVX8ApewSt4Bic4AfeAid3VOBixDxfPYEza5O+Xfi04YADggiUIULCuEJK8VhO4bSvpdnktHI5QCYtdi2sl8ZnXaHlqUrNKzdKcT8cjlq+rwZSvIVczNiezsfnP/uznmfPFBNODM2K7MTQ45YEAZqGP81AmGGcF3iPqOop0r1SPTaTbVkfUe4HXj97wYE+yNwWYxwWu4v1ugWHgo3S1XdZEVqWM7ET0cfnLGxWfkgR42o2PvWrDMBSFj/IHLaF0zKjRgdiVMwScNRAoWUoH78Y2icB/yIY09An6AH2Bdu/UB+yxopYshQiEvnvu0dURgDt8QeC8PDw7Fpji3fEA4z/PEJ6YOB5hKh4dj3EvXhxPqH/SKUY3rJ7srZ4FZnh1PMAtPhwP6fl2PMJMPDgeQ4rY8YT6Gzao0eAEA409DuggmTnFnOcSCiEiLMgxCiTI6Cq5DZUd3Qmp10vO0LaLTd2cjN4fOumlc7lUYbSQcZFkutRG7g6JKZKy0RmdLY680CDnEJ+UMkpFFe1RN7nxdVpXrC4aTtnaurOnYercZg2YVmLN/d/gczfEimrE/fs/bOuq29Zmn8tloORaXgZgGa78yO9/cnXm2BpaGvq25Dv9S4E9+5SIc9PqupJKhYFSSl47+Qcr1mYNAAAAeNptw0cKwkAAAMDZJA8Q7OUJvkLsPfZ6zFVERPy8qHh2YER+3i/BP83vIBLLySsoKimrqKqpa2hp6+jq6RsYGhmbmJqZSy0sraxtbO3sHRydnEMU4uR6yx7JJXveP7WrDycAAAAAAAH//wACeNpjYGRgYOABYhkgZgJCZgZNBkYGLQZtIJsFLMYAAAw3ALgAeNolizEKgDAQBCchRbC2sFER0YD6qVQiBCv/H9ezGI6Z5XBAw8CBK/m5iQQVauVbXLnOrMZv2oLdKFa8Pjuru2hJzGabmOSLzNMzvutpB3N42mNgZGBg4GKQYzBhYMxJLMlj4GBgAYow/P/PAJJhLM6sSoWKfWCAAwDAjgbRAAB42mNgYGBkAIIbCZo5IPrmUn0hGA0AO8EFTQAA');font-weight:400;font-style:normal}:root{--swiper-theme-color:#007aff}.swiper{margin-left:auto;margin-right:auto;position:relative;overflow:hidden;list-style:none;padding:0;z-index:1}.swiper-vertical>.swiper-wrapper{flex-direction:column}.swiper-wrapper{position:relative;width:100%;height:100%;z-index:1;display:flex;transition-property:transform;box-sizing:content-box}.swiper-android .swiper-slide,.swiper-wrapper{transform:translate3d(0px,0,0)}.swiper-pointer-events{touch-action:pan-y}.swiper-pointer-events.swiper-vertical{touch-action:pan-x}.swiper-slide{flex-shrink:0;width:100%;height:100%;position:relative;transition-property:transform}.swiper-slide-invisible-blank{visibility:hidden}.swiper-autoheight,.swiper-autoheight .swiper-slide{height:auto}.swiper-autoheight .swiper-wrapper{align-items:flex-start;transition-property:transform,height}.swiper-3d,.swiper-3d.swiper-css-mode .swiper-wrapper{perspective:1200px}.swiper-3d .swiper-cube-shadow,.swiper-3d .swiper-slide,.swiper-3d .swiper-slide-shadow,.swiper-3d .swiper-slide-shadow-bottom,.swiper-3d .swiper-slide-shadow-left,.swiper-3d .swiper-slide-shadow-right,.swiper-3d .swiper-slide-shadow-top,.swiper-3d .swiper-wrapper{transform-style:preserve-3d}.swiper-3d .swiper-slide-shadow,.swiper-3d .swiper-slide-shadow-bottom,.swiper-3d .swiper-slide-shadow-left,.swiper-3d .swiper-slide-shadow-right,.swiper-3d .swiper-slide-shadow-top{position:absolute;left:0;top:0;width:100%;height:100%;pointer-events:none;z-index:10}.swiper-3d .swiper-slide-shadow{background:rgba(0,0,0,.15)}.swiper-3d .swiper-slide-shadow-left{background-image:linear-gradient(to left,rgba(0,0,0,.5),rgba(0,0,0,0))}.swiper-3d .swiper-slide-shadow-right{background-image:linear-gradient(to right,rgba(0,0,0,.5),rgba(0,0,0,0))}.swiper-3d .swiper-slide-shadow-top{background-image:linear-gradient(to top,rgba(0,0,0,.5),rgba(0,0,0,0))}.swiper-3d .swiper-slide-shadow-bottom{background-image:linear-gradient(to bottom,rgba(0,0,0,.5),rgba(0,0,0,0))}.swiper-css-mode>.swiper-wrapper{overflow:auto;scrollbar-width:none;-ms-overflow-style:none}.swiper-css-mode>.swiper-wrapper::-webkit-scrollbar{display:none}.swiper-css-mode>.swiper-wrapper>.swiper-slide{scroll-snap-align:start start}.swiper-horizontal.swiper-css-mode>.swiper-wrapper{scroll-snap-type:x mandatory}.swiper-vertical.swiper-css-mode>.swiper-wrapper{scroll-snap-type:y mandatory}.swiper-centered>.swiper-wrapper::before{content:'';flex-shrink:0;order:9999}.swiper-centered.swiper-horizontal>.swiper-wrapper>.swiper-slide:first-child{margin-inline-start:var(--swiper-centered-offset-before)}.swiper-centered.swiper-horizontal>.swiper-wrapper::before{height:100%;min-height:1px;width:var(--swiper-centered-offset-after)}.swiper-centered.swiper-vertical>.swiper-wrapper>.swiper-slide:first-child{margin-block-start:var(--swiper-centered-offset-before)}.swiper-centered.swiper-vertical>.swiper-wrapper::before{width:100%;min-width:1px;height:var(--swiper-centered-offset-after)}.swiper-centered>.swiper-wrapper>.swiper-slide{scroll-snap-align:center center}.swiper-virtual.swiper-css-mode .swiper-wrapper::after{content:'';position:absolute;left:0;top:0;pointer-events:none}.swiper-virtual.swiper-css-mode.swiper-horizontal .swiper-wrapper::after{height:1px;width:var(--swiper-virtual-size)}.swiper-virtual.swiper-css-mode.swiper-vertical .swiper-wrapper::after{width:1px;height:var(--swiper-virtual-size)}:root{--swiper-navigation-size:44px}.swiper-button-next,.swiper-button-prev{position:absolute;top:50%;width:calc(var(--swiper-navigation-size)/ 44 * 27);height:var(--swiper-navigation-size);margin-top:calc(0px - (var(--swiper-navigation-size)/ 2));z-index:10;cursor:pointer;display:flex;align-items:center;justify-content:center;color:var(--swiper-navigation-color,var(--swiper-theme-color))}.swiper-button-next.swiper-button-disabled,.swiper-button-prev.swiper-button-disabled{opacity:.35;cursor:auto;pointer-events:none}.swiper-button-next:after,.swiper-button-prev:after{font-family:swiper-icons;font-size:var(--swiper-navigation-size);text-transform:none!important;letter-spacing:0;text-transform:none;font-variant:initial;line-height:1}.swiper-button-prev,.swiper-rtl .swiper-button-next{left:10px;right:auto}.swiper-button-prev:after,.swiper-rtl .swiper-button-next:after{content:'prev'}.swiper-button-next,.swiper-rtl .swiper-button-prev{right:10px;left:auto}.swiper-button-next:after,.swiper-rtl .swiper-button-prev:after{content:'next'}.swiper-button-lock{display:none}.swiper-pagination{position:absolute;text-align:center;transition:.3s opacity;transform:translate3d(0,0,0);z-index:10}.swiper-pagination.swiper-pagination-hidden{opacity:0}.swiper-horizontal>.swiper-pagination-bullets,.swiper-pagination-bullets.swiper-pagination-horizontal,.swiper-pagination-custom,.swiper-pagination-fraction{bottom:10px;left:0;width:100%}.swiper-pagination-bullets-dynamic{overflow:hidden;font-size:0}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet{transform:scale(.33);position:relative}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active{transform:scale(1)}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active-main{transform:scale(1)}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active-prev{transform:scale(.66)}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active-prev-prev{transform:scale(.33)}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active-next{transform:scale(.66)}.swiper-pagination-bullets-dynamic .swiper-pagination-bullet-active-next-next{transform:scale(.33)}.swiper-pagination-bullet{width:var(--swiper-pagination-bullet-width,var(--swiper-pagination-bullet-size,8px));height:var(--swiper-pagination-bullet-height,var(--swiper-pagination-bullet-size,8px));display:inline-block;border-radius:50%;background:var(--swiper-pagination-bullet-inactive-color,#000);opacity:var(--swiper-pagination-bullet-inactive-opacity, .2)}button.swiper-pagination-bullet{border:none;margin:0;padding:0;box-shadow:none;-webkit-appearance:none;appearance:none}.swiper-pagination-clickable .swiper-pagination-bullet{cursor:pointer}.swiper-pagination-bullet:only-child{display:none!important}.swiper-pagination-bullet-active{opacity:var(--swiper-pagination-bullet-opacity, 1);background:var(--swiper-pagination-color,var(--swiper-theme-color))}.swiper-pagination-vertical.swiper-pagination-bullets,.swiper-vertical>.swiper-pagination-bullets{right:10px;top:50%;transform:translate3d(0px,-50%,0)}.swiper-pagination-vertical.swiper-pagination-bullets .swiper-pagination-bullet,.swiper-vertical>.swiper-pagination-bullets .swiper-pagination-bullet{margin:var(--swiper-pagination-bullet-vertical-gap,6px) 0;display:block}.swiper-pagination-vertical.swiper-pagination-bullets.swiper-pagination-bullets-dynamic,.swiper-vertical>.swiper-pagination-bullets.swiper-pagination-bullets-dynamic{top:50%;transform:translateY(-50%);width:8px}.swiper-pagination-vertical.swiper-pagination-bullets.swiper-pagination-bullets-dynamic .swiper-pagination-bullet,.swiper-vertical>.swiper-pagination-bullets.swiper-pagination-bullets-dynamic .swiper-pagination-bullet{display:inline-block;transition:.2s transform,.2s top}.swiper-horizontal>.swiper-pagination-bullets .swiper-pagination-bullet,.swiper-pagination-horizontal.swiper-pagination-bullets .swiper-pagination-bullet{margin:0 var(--swiper-pagination-bullet-horizontal-gap,4px)}.swiper-horizontal>.swiper-pagination-bullets.swiper-pagination-bullets-dynamic,.swiper-pagination-horizontal.swiper-pagination-bullets.swiper-pagination-bullets-dynamic{left:50%;transform:translateX(-50%);white-space:nowrap}.swiper-horizontal>.swiper-pagination-bullets.swiper-pagination-bullets-dynamic .swiper-pagination-bullet,.swiper-pagination-horizontal.swiper-pagination-bullets.swiper-pagination-bullets-dynamic .swiper-pagination-bullet{transition:.2s transform,.2s left}.swiper-horizontal.swiper-rtl>.swiper-pagination-bullets-dynamic .swiper-pagination-bullet{transition:.2s transform,.2s right}.swiper-pagination-progressbar{background:rgba(0,0,0,.25);position:absolute}.swiper-pagination-progressbar .swiper-pagination-progressbar-fill{background:var(--swiper-pagination-color,var(--swiper-theme-color));position:absolute;left:0;top:0;width:100%;height:100%;transform:scale(0);transform-origin:left top}.swiper-rtl .swiper-pagination-progressbar .swiper-pagination-progressbar-fill{transform-origin:right top}.swiper-horizontal>.swiper-pagination-progressbar,.swiper-pagination-progressbar.swiper-pagination-horizontal,.swiper-pagination-progressbar.swiper-pagination-vertical.swiper-pagination-progressbar-opposite,.swiper-vertical>.swiper-pagination-progressbar.swiper-pagination-progressbar-opposite{width:100%;height:4px;left:0;top:0}.swiper-horizontal>.swiper-pagination-progressbar.swiper-pagination-progressbar-opposite,.swiper-pagination-progressbar.swiper-pagination-horizontal.swiper-pagination-progressbar-opposite,.swiper-pagination-progressbar.swiper-pagination-vertical,.swiper-vertical>.swiper-pagination-progressbar{width:4px;height:100%;left:0;top:0}.swiper-pagination-lock{display:none}.swiper-scrollbar{border-radius:10px;position:relative;-ms-touch-action:none;background:rgba(0,0,0,.1)}.swiper-horizontal>.swiper-scrollbar{position:absolute;left:1%;bottom:3px;z-index:50;height:5px;width:98%}.swiper-vertical>.swiper-scrollbar{position:absolute;right:3px;top:1%;z-index:50;width:5px;height:98%}.swiper-scrollbar-drag{height:100%;width:100%;position:relative;background:rgba(0,0,0,.5);border-radius:10px;left:0;top:0}.swiper-scrollbar-cursor-drag{cursor:move}.swiper-scrollbar-lock{display:none}.swiper-zoom-container{width:100%;height:100%;display:flex;justify-content:center;align-items:center;text-align:center}.swiper-zoom-container>canvas,.swiper-zoom-container>img,.swiper-zoom-container>svg{max-width:100%;max-height:100%;object-fit:contain}.swiper-slide-zoomed{cursor:move}.swiper-lazy-preloader{width:42px;height:42px;position:absolute;left:50%;top:50%;margin-left:-21px;margin-top:-21px;z-index:10;transform-origin:50%;animation:swiper-preloader-spin 1s infinite linear;box-sizing:border-box;border:4px solid var(--swiper-preloader-color,var(--swiper-theme-color));border-radius:50%;border-top-color:transparent}.swiper-lazy-preloader-white{--swiper-preloader-color:#fff}.swiper-lazy-preloader-black{--swiper-preloader-color:#000}@keyframes swiper-preloader-spin{100%{transform:rotate(360deg)}}.swiper .swiper-notification{position:absolute;left:0;top:0;pointer-events:none;opacity:0;z-index:-1000}.swiper-free-mode>.swiper-wrapper{transition-timing-function:ease-out;margin:0 auto}.swiper-grid>.swiper-wrapper{flex-wrap:wrap}.swiper-grid-column>.swiper-wrapper{flex-wrap:wrap;flex-direction:column}.swiper-fade.swiper-free-mode .swiper-slide{transition-timing-function:ease-out}.swiper-fade .swiper-slide{pointer-events:none;transition-property:opacity}.swiper-fade .swiper-slide .swiper-slide{pointer-events:none}.swiper-fade .swiper-slide-active,.swiper-fade .swiper-slide-active .swiper-slide-active{pointer-events:auto}.swiper-cube{overflow:visible}.swiper-cube .swiper-slide{pointer-events:none;-webkit-backface-visibility:hidden;backface-visibility:hidden;z-index:1;visibility:hidden;transform-origin:0 0;width:100%;height:100%}.swiper-cube .swiper-slide .swiper-slide{pointer-events:none}.swiper-cube.swiper-rtl .swiper-slide{transform-origin:100% 0}.swiper-cube .swiper-slide-active,.swiper-cube .swiper-slide-active .swiper-slide-active{pointer-events:auto}.swiper-cube .swiper-slide-active,.swiper-cube .swiper-slide-next,.swiper-cube .swiper-slide-next+.swiper-slide,.swiper-cube .swiper-slide-prev{pointer-events:auto;visibility:visible}.swiper-cube .swiper-slide-shadow-bottom,.swiper-cube .swiper-slide-shadow-left,.swiper-cube .swiper-slide-shadow-right,.swiper-cube .swiper-slide-shadow-top{z-index:0;-webkit-backface-visibility:hidden;backface-visibility:hidden}.swiper-cube .swiper-cube-shadow{position:absolute;left:0;bottom:0px;width:100%;height:100%;opacity:.6;z-index:0}.swiper-cube .swiper-cube-shadow:before{content:'';background:#000;position:absolute;left:0;top:0;bottom:0;right:0;filter:blur(50px)}.swiper-flip{overflow:visible}.swiper-flip .swiper-slide{pointer-events:none;-webkit-backface-visibility:hidden;backface-visibility:hidden;z-index:1}.swiper-flip .swiper-slide .swiper-slide{pointer-events:none}.swiper-flip .swiper-slide-active,.swiper-flip .swiper-slide-active .swiper-slide-active{pointer-events:auto}.swiper-flip .swiper-slide-shadow-bottom,.swiper-flip .swiper-slide-shadow-left,.swiper-flip .swiper-slide-shadow-right,.swiper-flip .swiper-slide-shadow-top{z-index:0;-webkit-backface-visibility:hidden;backface-visibility:hidden}.swiper-creative .swiper-slide{-webkit-backface-visibility:hidden;backface-visibility:hidden;overflow:hidden;transition-property:transform,opacity,height}.swiper-cards{overflow:visible}.swiper-cards .swiper-slide{transform-origin:center bottom;-webkit-backface-visibility:hidden;backface-visibility:hidden;overflow:hidden} \ No newline at end of file diff --git a/static/swiper/swiper-bundle.min.js b/static/swiper/swiper-bundle.min.js new file mode 100644 index 000000000..3a0b44676 --- /dev/null +++ b/static/swiper/swiper-bundle.min.js @@ -0,0 +1,14 @@ +/** + * Swiper 7.2.0 + * Most modern mobile touch slider and framework with hardware accelerated transitions + * https://swiperjs.com + * + * Copyright 2014-2021 Vladimir Kharlampidi + * + * Released under the MIT License + * + * Released on: October 27, 2021 + */ + +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).Swiper=t()}(this,(function(){"use strict";function e(e){return null!==e&&"object"==typeof e&&"constructor"in e&&e.constructor===Object}function t(s={},a={}){Object.keys(a).forEach((i=>{void 0===s[i]?s[i]=a[i]:e(a[i])&&e(s[i])&&Object.keys(a[i]).length>0&&t(s[i],a[i])}))}const s={body:{},addEventListener(){},removeEventListener(){},activeElement:{blur(){},nodeName:""},querySelector:()=>null,querySelectorAll:()=>[],getElementById:()=>null,createEvent:()=>({initEvent(){}}),createElement:()=>({children:[],childNodes:[],style:{},setAttribute(){},getElementsByTagName:()=>[]}),createElementNS:()=>({}),importNode:()=>null,location:{hash:"",host:"",hostname:"",href:"",origin:"",pathname:"",protocol:"",search:""}};function a(){const e="undefined"!=typeof document?document:{};return t(e,s),e}const i={document:s,navigator:{userAgent:""},location:{hash:"",host:"",hostname:"",href:"",origin:"",pathname:"",protocol:"",search:""},history:{replaceState(){},pushState(){},go(){},back(){}},CustomEvent:function(){return this},addEventListener(){},removeEventListener(){},getComputedStyle:()=>({getPropertyValue:()=>""}),Image(){},Date(){},screen:{},setTimeout(){},clearTimeout(){},matchMedia:()=>({}),requestAnimationFrame:e=>"undefined"==typeof setTimeout?(e(),null):setTimeout(e,0),cancelAnimationFrame(e){"undefined"!=typeof setTimeout&&clearTimeout(e)}};function r(){const e="undefined"!=typeof window?window:{};return t(e,i),e}class n extends Array{constructor(e){super(...e||[]),function(e){const t=e.__proto__;Object.defineProperty(e,"__proto__",{get:()=>t,set(e){t.__proto__=e}})}(this)}}function l(e=[]){const t=[];return e.forEach((e=>{Array.isArray(e)?t.push(...l(e)):t.push(e)})),t}function o(e,t){return Array.prototype.filter.call(e,t)}function d(e,t){const s=r(),i=a();let l=[];if(!t&&e instanceof n)return e;if(!e)return new n(l);if("string"==typeof e){const s=e.trim();if(s.indexOf("<")>=0&&s.indexOf(">")>=0){let e="div";0===s.indexOf("e.split(" "))));return this.forEach((e=>{e.classList.add(...t)})),this},removeClass:function(...e){const t=l(e.map((e=>e.split(" "))));return this.forEach((e=>{e.classList.remove(...t)})),this},hasClass:function(...e){const t=l(e.map((e=>e.split(" "))));return o(this,(e=>t.filter((t=>e.classList.contains(t))).length>0)).length>0},toggleClass:function(...e){const t=l(e.map((e=>e.split(" "))));this.forEach((e=>{t.forEach((t=>{e.classList.toggle(t)}))}))},attr:function(e,t){if(1===arguments.length&&"string"==typeof e)return this[0]?this[0].getAttribute(e):void 0;for(let s=0;s=0;e-=1){const s=n[e];a&&s.listener===a||a&&s.listener&&s.listener.dom7proxy&&s.listener.dom7proxy===a?(r.removeEventListener(t,s.proxyListener,i),n.splice(e,1)):a||(r.removeEventListener(t,s.proxyListener,i),n.splice(e,1))}}}return this},trigger:function(...e){const t=r(),s=e[0].split(" "),a=e[1];for(let i=0;it>0)),i.dispatchEvent(s),i.dom7EventData=[],delete i.dom7EventData}}}return this},transitionEnd:function(e){const t=this;return e&&t.on("transitionend",(function s(a){a.target===this&&(e.call(this,a),t.off("transitionend",s))})),this},outerWidth:function(e){if(this.length>0){if(e){const e=this.styles();return this[0].offsetWidth+parseFloat(e.getPropertyValue("margin-right"))+parseFloat(e.getPropertyValue("margin-left"))}return this[0].offsetWidth}return null},outerHeight:function(e){if(this.length>0){if(e){const e=this.styles();return this[0].offsetHeight+parseFloat(e.getPropertyValue("margin-top"))+parseFloat(e.getPropertyValue("margin-bottom"))}return this[0].offsetHeight}return null},styles:function(){const e=r();return this[0]?e.getComputedStyle(this[0],null):{}},offset:function(){if(this.length>0){const e=r(),t=a(),s=this[0],i=s.getBoundingClientRect(),n=t.body,l=s.clientTop||n.clientTop||0,o=s.clientLeft||n.clientLeft||0,d=s===e?e.scrollY:s.scrollTop,c=s===e?e.scrollX:s.scrollLeft;return{top:i.top+d-l,left:i.left+c-o}}return null},css:function(e,t){const s=r();let a;if(1===arguments.length){if("string"!=typeof e){for(a=0;a{e.apply(t,[t,s])})),this):this},html:function(e){if(void 0===e)return this[0]?this[0].innerHTML:null;for(let t=0;tt-1)return d([]);if(e<0){const s=t+e;return d(s<0?[]:[this[s]])}return d([this[e]])},append:function(...e){let t;const s=a();for(let a=0;a=0;i-=1)this[s].insertBefore(a.childNodes[i],this[s].childNodes[0])}else if(e instanceof n)for(i=0;i0?e?this[0].nextElementSibling&&d(this[0].nextElementSibling).is(e)?d([this[0].nextElementSibling]):d([]):this[0].nextElementSibling?d([this[0].nextElementSibling]):d([]):d([])},nextAll:function(e){const t=[];let s=this[0];if(!s)return d([]);for(;s.nextElementSibling;){const a=s.nextElementSibling;e?d(a).is(e)&&t.push(a):t.push(a),s=a}return d(t)},prev:function(e){if(this.length>0){const t=this[0];return e?t.previousElementSibling&&d(t.previousElementSibling).is(e)?d([t.previousElementSibling]):d([]):t.previousElementSibling?d([t.previousElementSibling]):d([])}return d([])},prevAll:function(e){const t=[];let s=this[0];if(!s)return d([]);for(;s.previousElementSibling;){const a=s.previousElementSibling;e?d(a).is(e)&&t.push(a):t.push(a),s=a}return d(t)},parent:function(e){const t=[];for(let s=0;s6&&(i=i.split(", ").map((e=>e.replace(",","."))).join(", ")),n=new s.WebKitCSSMatrix("none"===i?"":i)):(n=l.MozTransform||l.OTransform||l.MsTransform||l.msTransform||l.transform||l.getPropertyValue("transform").replace("translate(","matrix(1, 0, 0, 1,"),a=n.toString().split(",")),"x"===t&&(i=s.WebKitCSSMatrix?n.m41:16===a.length?parseFloat(a[12]):parseFloat(a[4])),"y"===t&&(i=s.WebKitCSSMatrix?n.m42:16===a.length?parseFloat(a[13]):parseFloat(a[5])),i||0}function m(e){return"object"==typeof e&&null!==e&&e.constructor&&"Object"===Object.prototype.toString.call(e).slice(8,-1)}function f(...e){const t=Object(e[0]),s=["__proto__","constructor","prototype"];for(let i=1;is.indexOf(e)<0));for(let s=0,a=e.length;si?"next":"prev",c=(e,t)=>"next"===d&&e>=t||"prev"===d&&e<=t,p=()=>{n=(new Date).getTime(),null===l&&(l=n);const r=Math.max(Math.min((n-l)/o,1),0),d=.5-Math.cos(r*Math.PI)/2;let u=i+d*(t-i);if(c(u,t)&&(u=t),e.wrapperEl.scrollTo({[s]:u}),c(u,t))return e.wrapperEl.style.overflow="hidden",e.wrapperEl.style.scrollSnapType="",setTimeout((()=>{e.wrapperEl.style.overflow="",e.wrapperEl.scrollTo({[s]:u})})),void a.cancelAnimationFrame(e.cssModeFrameID);e.cssModeFrameID=a.requestAnimationFrame(p)};p()}let w,b,x;function y(){return w||(w=function(){const e=r(),t=a();return{smoothScroll:t.documentElement&&"scrollBehavior"in t.documentElement.style,touch:!!("ontouchstart"in e||e.DocumentTouch&&t instanceof e.DocumentTouch),passiveListener:function(){let t=!1;try{const s=Object.defineProperty({},"passive",{get(){t=!0}});e.addEventListener("testPassiveListener",null,s)}catch(e){}return t}(),gestures:"ongesturestart"in e}}()),w}function E(e={}){return b||(b=function({userAgent:e}={}){const t=y(),s=r(),a=s.navigator.platform,i=e||s.navigator.userAgent,n={ios:!1,android:!1},l=s.screen.width,o=s.screen.height,d=i.match(/(Android);?[\s\/]+([\d.]+)?/);let c=i.match(/(iPad).*OS\s([\d_]+)/);const p=i.match(/(iPod)(.*OS\s([\d_]+))?/),u=!c&&i.match(/(iPhone\sOS|iOS)\s([\d_]+)/),h="Win32"===a;let m="MacIntel"===a;return!c&&m&&t.touch&&["1024x1366","1366x1024","834x1194","1194x834","834x1112","1112x834","768x1024","1024x768","820x1180","1180x820","810x1080","1080x810"].indexOf(`${l}x${o}`)>=0&&(c=i.match(/(Version)\/([\d.]+)/),c||(c=[0,1,"13_0_0"]),m=!1),d&&!h&&(n.os="android",n.android=!0),(c||u||p)&&(n.os="ios",n.ios=!0),n}(e)),b}function T(){return x||(x=function(){const e=r();return{isSafari:function(){const t=e.navigator.userAgent.toLowerCase();return t.indexOf("safari")>=0&&t.indexOf("chrome")<0&&t.indexOf("android")<0}(),isWebView:/(iPhone|iPod|iPad).*AppleWebKit(?!.*Safari)/i.test(e.navigator.userAgent)}}()),x}Object.keys(c).forEach((e=>{Object.defineProperty(d.fn,e,{value:c[e],writable:!0})}));var C={on(e,t,s){const a=this;if("function"!=typeof t)return a;const i=s?"unshift":"push";return e.split(" ").forEach((e=>{a.eventsListeners[e]||(a.eventsListeners[e]=[]),a.eventsListeners[e][i](t)})),a},once(e,t,s){const a=this;if("function"!=typeof t)return a;function i(...s){a.off(e,i),i.__emitterProxy&&delete i.__emitterProxy,t.apply(a,s)}return i.__emitterProxy=t,a.on(e,i,s)},onAny(e,t){const s=this;if("function"!=typeof e)return s;const a=t?"unshift":"push";return s.eventsAnyListeners.indexOf(e)<0&&s.eventsAnyListeners[a](e),s},offAny(e){const t=this;if(!t.eventsAnyListeners)return t;const s=t.eventsAnyListeners.indexOf(e);return s>=0&&t.eventsAnyListeners.splice(s,1),t},off(e,t){const s=this;return s.eventsListeners?(e.split(" ").forEach((e=>{void 0===t?s.eventsListeners[e]=[]:s.eventsListeners[e]&&s.eventsListeners[e].forEach(((a,i)=>{(a===t||a.__emitterProxy&&a.__emitterProxy===t)&&s.eventsListeners[e].splice(i,1)}))})),s):s},emit(...e){const t=this;if(!t.eventsListeners)return t;let s,a,i;"string"==typeof e[0]||Array.isArray(e[0])?(s=e[0],a=e.slice(1,e.length),i=t):(s=e[0].events,a=e[0].data,i=e[0].context||t),a.unshift(i);return(Array.isArray(s)?s:s.split(" ")).forEach((e=>{t.eventsAnyListeners&&t.eventsAnyListeners.length&&t.eventsAnyListeners.forEach((t=>{t.apply(i,[e,...a])})),t.eventsListeners&&t.eventsListeners[e]&&t.eventsListeners[e].forEach((e=>{e.apply(i,a)}))})),t}};function $({swiper:e,runCallbacks:t,direction:s,step:a}){const{activeIndex:i,previousIndex:r}=e;let n=s;if(n||(n=i>r?"next":i0)return;if(n.isTouched&&n.isMoved)return;!!l.noSwipingClass&&""!==l.noSwipingClass&&p.target&&p.target.shadowRoot&&e.path&&e.path[0]&&(h=d(e.path[0]));const m=l.noSwipingSelector?l.noSwipingSelector:`.${l.noSwipingClass}`,f=!(!p.target||!p.target.shadowRoot);if(l.noSwiping&&(f?function(e,t=this){return function t(s){return s&&s!==a()&&s!==r()?(s.assignedSlot&&(s=s.assignedSlot),s.closest(e)||t(s.getRootNode().host)):null}(t)}(m,p.target):h.closest(m)[0]))return void(t.allowClick=!0);if(l.swipeHandler&&!h.closest(l.swipeHandler)[0])return;o.currentX="touchstart"===p.type?p.targetTouches[0].pageX:p.pageX,o.currentY="touchstart"===p.type?p.targetTouches[0].pageY:p.pageY;const g=o.currentX,v=o.currentY,w=l.edgeSwipeDetection||l.iOSEdgeSwipeDetection,b=l.edgeSwipeThreshold||l.iOSEdgeSwipeThreshold;if(w&&(g<=b||g>=i.innerWidth-b)){if("prevent"!==w)return;e.preventDefault()}if(Object.assign(n,{isTouched:!0,isMoved:!1,allowTouchCallbacks:!0,isScrolling:void 0,startMoving:void 0}),o.startX=g,o.startY=v,n.touchStartTime=u(),t.allowClick=!0,t.updateSize(),t.swipeDirection=void 0,l.threshold>0&&(n.allowThresholdMove=!1),"touchstart"!==p.type){let e=!0;h.is(n.focusableElements)&&(e=!1),s.activeElement&&d(s.activeElement).is(n.focusableElements)&&s.activeElement!==h[0]&&s.activeElement.blur();const a=e&&t.allowTouchMove&&l.touchStartPreventDefault;!l.touchStartForcePreventDefault&&!a||h[0].isContentEditable||p.preventDefault()}t.emit("touchStart",p)}function M(e){const t=a(),s=this,i=s.touchEventsData,{params:r,touches:n,rtlTranslate:l,enabled:o}=s;if(!o)return;let c=e;if(c.originalEvent&&(c=c.originalEvent),!i.isTouched)return void(i.startMoving&&i.isScrolling&&s.emit("touchMoveOpposite",c));if(i.isTouchEvent&&"touchmove"!==c.type)return;const p="touchmove"===c.type&&c.targetTouches&&(c.targetTouches[0]||c.changedTouches[0]),h="touchmove"===c.type?p.pageX:c.pageX,m="touchmove"===c.type?p.pageY:c.pageY;if(c.preventedByNestedSwiper)return n.startX=h,void(n.startY=m);if(!s.allowTouchMove)return s.allowClick=!1,void(i.isTouched&&(Object.assign(n,{startX:h,startY:m,currentX:h,currentY:m}),i.touchStartTime=u()));if(i.isTouchEvent&&r.touchReleaseOnEdges&&!r.loop)if(s.isVertical()){if(mn.startY&&s.translate>=s.minTranslate())return i.isTouched=!1,void(i.isMoved=!1)}else if(hn.startX&&s.translate>=s.minTranslate())return;if(i.isTouchEvent&&t.activeElement&&c.target===t.activeElement&&d(c.target).is(i.focusableElements))return i.isMoved=!0,void(s.allowClick=!1);if(i.allowTouchCallbacks&&s.emit("touchMove",c),c.targetTouches&&c.targetTouches.length>1)return;n.currentX=h,n.currentY=m;const f=n.currentX-n.startX,g=n.currentY-n.startY;if(s.params.threshold&&Math.sqrt(f**2+g**2)=25&&(e=180*Math.atan2(Math.abs(g),Math.abs(f))/Math.PI,i.isScrolling=s.isHorizontal()?e>r.touchAngle:90-e>r.touchAngle)}if(i.isScrolling&&s.emit("touchMoveOpposite",c),void 0===i.startMoving&&(n.currentX===n.startX&&n.currentY===n.startY||(i.startMoving=!0)),i.isScrolling)return void(i.isTouched=!1);if(!i.startMoving)return;s.allowClick=!1,!r.cssMode&&c.cancelable&&c.preventDefault(),r.touchMoveStopPropagation&&!r.nested&&c.stopPropagation(),i.isMoved||(r.loop&&!r.cssMode&&s.loopFix(),i.startTranslate=s.getTranslate(),s.setTransition(0),s.animating&&s.$wrapperEl.trigger("webkitTransitionEnd transitionend"),i.allowMomentumBounce=!1,!r.grabCursor||!0!==s.allowSlideNext&&!0!==s.allowSlidePrev||s.setGrabCursor(!0),s.emit("sliderFirstMove",c)),s.emit("sliderMove",c),i.isMoved=!0;let v=s.isHorizontal()?f:g;n.diff=v,v*=r.touchRatio,l&&(v=-v),s.swipeDirection=v>0?"prev":"next",i.currentTranslate=v+i.startTranslate;let w=!0,b=r.resistanceRatio;if(r.touchReleaseOnEdges&&(b=0),v>0&&i.currentTranslate>s.minTranslate()?(w=!1,r.resistance&&(i.currentTranslate=s.minTranslate()-1+(-s.minTranslate()+i.startTranslate+v)**b)):v<0&&i.currentTranslatei.startTranslate&&(i.currentTranslate=i.startTranslate),s.allowSlidePrev||s.allowSlideNext||(i.currentTranslate=i.startTranslate),r.threshold>0){if(!(Math.abs(v)>r.threshold||i.allowThresholdMove))return void(i.currentTranslate=i.startTranslate);if(!i.allowThresholdMove)return i.allowThresholdMove=!0,n.startX=n.currentX,n.startY=n.currentY,i.currentTranslate=i.startTranslate,void(n.diff=s.isHorizontal()?n.currentX-n.startX:n.currentY-n.startY)}r.followFinger&&!r.cssMode&&((r.freeMode&&r.freeMode.enabled&&s.freeMode||r.watchSlidesProgress)&&(s.updateActiveIndex(),s.updateSlidesClasses()),s.params.freeMode&&r.freeMode.enabled&&s.freeMode&&s.freeMode.onTouchMove(),s.updateProgress(i.currentTranslate),s.setTranslate(i.currentTranslate))}function P(e){const t=this,s=t.touchEventsData,{params:a,touches:i,rtlTranslate:r,slidesGrid:n,enabled:l}=t;if(!l)return;let o=e;if(o.originalEvent&&(o=o.originalEvent),s.allowTouchCallbacks&&t.emit("touchEnd",o),s.allowTouchCallbacks=!1,!s.isTouched)return s.isMoved&&a.grabCursor&&t.setGrabCursor(!1),s.isMoved=!1,void(s.startMoving=!1);a.grabCursor&&s.isMoved&&s.isTouched&&(!0===t.allowSlideNext||!0===t.allowSlidePrev)&&t.setGrabCursor(!1);const d=u(),c=d-s.touchStartTime;if(t.allowClick&&(t.updateClickedSlide(o),t.emit("tap click",o),c<300&&d-s.lastClickTime<300&&t.emit("doubleTap doubleClick",o)),s.lastClickTime=u(),p((()=>{t.destroyed||(t.allowClick=!0)})),!s.isTouched||!s.isMoved||!t.swipeDirection||0===i.diff||s.currentTranslate===s.startTranslate)return s.isTouched=!1,s.isMoved=!1,void(s.startMoving=!1);let h;if(s.isTouched=!1,s.isMoved=!1,s.startMoving=!1,h=a.followFinger?r?t.translate:-t.translate:-s.currentTranslate,a.cssMode)return;if(t.params.freeMode&&a.freeMode.enabled)return void t.freeMode.onTouchEnd({currentPos:h});let m=0,f=t.slidesSizesGrid[0];for(let e=0;e=n[e]&&h=n[e]&&(m=e,f=n[n.length-1]-n[n.length-2])}const g=(h-n[m])/f,v=ma.longSwipesMs){if(!a.longSwipes)return void t.slideTo(t.activeIndex);"next"===t.swipeDirection&&(g>=a.longSwipesRatio?t.slideTo(m+v):t.slideTo(m)),"prev"===t.swipeDirection&&(g>1-a.longSwipesRatio?t.slideTo(m+v):t.slideTo(m))}else{if(!a.shortSwipes)return void t.slideTo(t.activeIndex);t.navigation&&(o.target===t.navigation.nextEl||o.target===t.navigation.prevEl)?o.target===t.navigation.nextEl?t.slideTo(m+v):t.slideTo(m):("next"===t.swipeDirection&&t.slideTo(m+v),"prev"===t.swipeDirection&&t.slideTo(m))}}function k(){const e=this,{params:t,el:s}=e;if(s&&0===s.offsetWidth)return;t.breakpoints&&e.setBreakpoint();const{allowSlideNext:a,allowSlidePrev:i,snapGrid:r}=e;e.allowSlideNext=!0,e.allowSlidePrev=!0,e.updateSize(),e.updateSlides(),e.updateSlidesClasses(),("auto"===t.slidesPerView||t.slidesPerView>1)&&e.isEnd&&!e.isBeginning&&!e.params.centeredSlides?e.slideTo(e.slides.length-1,0,!1,!0):e.slideTo(e.activeIndex,0,!1,!0),e.autoplay&&e.autoplay.running&&e.autoplay.paused&&e.autoplay.run(),e.allowSlidePrev=i,e.allowSlideNext=a,e.params.watchOverflow&&r!==e.snapGrid&&e.checkOverflow()}function z(e){const t=this;t.enabled&&(t.allowClick||(t.params.preventClicks&&e.preventDefault(),t.params.preventClicksPropagation&&t.animating&&(e.stopPropagation(),e.stopImmediatePropagation())))}function O(){const e=this,{wrapperEl:t,rtlTranslate:s,enabled:a}=e;if(!a)return;let i;e.previousTranslate=e.translate,e.isHorizontal()?e.translate=-t.scrollLeft:e.translate=-t.scrollTop,-0===e.translate&&(e.translate=0),e.updateActiveIndex(),e.updateSlidesClasses();const r=e.maxTranslate()-e.minTranslate();i=0===r?0:(e.translate-e.minTranslate())/r,i!==e.progress&&e.updateProgress(s?-e.translate:e.translate),e.emit("setTranslate",e.translate,!1)}let I=!1;function L(){}const A=(e,t)=>{const s=a(),{params:i,touchEvents:r,el:n,wrapperEl:l,device:o,support:d}=e,c=!!i.nested,p="on"===t?"addEventListener":"removeEventListener",u=t;if(d.touch){const t=!("touchstart"!==r.start||!d.passiveListener||!i.passiveListeners)&&{passive:!0,capture:!1};n[p](r.start,e.onTouchStart,t),n[p](r.move,e.onTouchMove,d.passiveListener?{passive:!1,capture:c}:c),n[p](r.end,e.onTouchEnd,t),r.cancel&&n[p](r.cancel,e.onTouchEnd,t)}else n[p](r.start,e.onTouchStart,!1),s[p](r.move,e.onTouchMove,c),s[p](r.end,e.onTouchEnd,!1);(i.preventClicks||i.preventClicksPropagation)&&n[p]("click",e.onClick,!0),i.cssMode&&l[p]("scroll",e.onScroll),i.updateOnWindowResize?e[u](o.ios||o.android?"resize orientationchange observerUpdate":"resize observerUpdate",k,!0):e[u]("observerUpdate",k,!0)};const D=(e,t)=>e.grid&&t.grid&&t.grid.rows>1;var G={init:!0,direction:"horizontal",touchEventsTarget:"wrapper",initialSlide:0,speed:300,cssMode:!1,updateOnWindowResize:!0,resizeObserver:!0,nested:!1,createElements:!1,enabled:!0,focusableElements:"input, select, option, textarea, button, video, label",width:null,height:null,preventInteractionOnTransition:!1,userAgent:null,url:null,edgeSwipeDetection:!1,edgeSwipeThreshold:20,autoHeight:!1,setWrapperSize:!1,virtualTranslate:!1,effect:"slide",breakpoints:void 0,breakpointsBase:"window",spaceBetween:0,slidesPerView:1,slidesPerGroup:1,slidesPerGroupSkip:0,slidesPerGroupAuto:!1,centeredSlides:!1,centeredSlidesBounds:!1,slidesOffsetBefore:0,slidesOffsetAfter:0,normalizeSlideIndex:!0,centerInsufficientSlides:!1,watchOverflow:!0,roundLengths:!1,touchRatio:1,touchAngle:45,simulateTouch:!0,shortSwipes:!0,longSwipes:!0,longSwipesRatio:.5,longSwipesMs:300,followFinger:!0,allowTouchMove:!0,threshold:0,touchMoveStopPropagation:!1,touchStartPreventDefault:!0,touchStartForcePreventDefault:!1,touchReleaseOnEdges:!1,uniqueNavElements:!0,resistance:!0,resistanceRatio:.85,watchSlidesProgress:!1,grabCursor:!1,preventClicks:!0,preventClicksPropagation:!0,slideToClickedSlide:!1,preloadImages:!0,updateOnImagesReady:!0,loop:!1,loopAdditionalSlides:0,loopedSlides:null,loopFillGroupWithBlank:!1,loopPreventsSlide:!0,allowSlidePrev:!0,allowSlideNext:!0,swipeHandler:null,noSwiping:!0,noSwipingClass:"swiper-no-swiping",noSwipingSelector:null,passiveListeners:!0,containerModifierClass:"swiper-",slideClass:"swiper-slide",slideBlankClass:"swiper-slide-invisible-blank",slideActiveClass:"swiper-slide-active",slideDuplicateActiveClass:"swiper-slide-duplicate-active",slideVisibleClass:"swiper-slide-visible",slideDuplicateClass:"swiper-slide-duplicate",slideNextClass:"swiper-slide-next",slideDuplicateNextClass:"swiper-slide-duplicate-next",slidePrevClass:"swiper-slide-prev",slideDuplicatePrevClass:"swiper-slide-duplicate-prev",wrapperClass:"swiper-wrapper",runCallbacksOnInit:!0,_emitClasses:!1};function N(e,t){return function(s={}){const a=Object.keys(s)[0],i=s[a];"object"==typeof i&&null!==i?(["navigation","pagination","scrollbar"].indexOf(a)>=0&&!0===e[a]&&(e[a]={auto:!0}),a in e&&"enabled"in i?(!0===e[a]&&(e[a]={enabled:!0}),"object"!=typeof e[a]||"enabled"in e[a]||(e[a].enabled=!0),e[a]||(e[a]={enabled:!1}),f(t,s)):f(t,s)):f(t,s)}}const B={eventsEmitter:C,update:{updateSize:function(){const e=this;let t,s;const a=e.$el;t=void 0!==e.params.width&&null!==e.params.width?e.params.width:a[0].clientWidth,s=void 0!==e.params.height&&null!==e.params.height?e.params.height:a[0].clientHeight,0===t&&e.isHorizontal()||0===s&&e.isVertical()||(t=t-parseInt(a.css("padding-left")||0,10)-parseInt(a.css("padding-right")||0,10),s=s-parseInt(a.css("padding-top")||0,10)-parseInt(a.css("padding-bottom")||0,10),Number.isNaN(t)&&(t=0),Number.isNaN(s)&&(s=0),Object.assign(e,{width:t,height:s,size:e.isHorizontal()?t:s}))},updateSlides:function(){const e=this;function t(t){return e.isHorizontal()?t:{width:"height","margin-top":"margin-left","margin-bottom ":"margin-right","margin-left":"margin-top","margin-right":"margin-bottom","padding-left":"padding-top","padding-right":"padding-bottom",marginRight:"marginBottom"}[t]}function s(e,s){return parseFloat(e.getPropertyValue(t(s))||0)}const a=e.params,{$wrapperEl:i,size:r,rtlTranslate:n,wrongRTL:l}=e,o=e.virtual&&a.virtual.enabled,d=o?e.virtual.slides.length:e.slides.length,c=i.children(`.${e.params.slideClass}`),p=o?e.virtual.slides.length:c.length;let u=[];const h=[],m=[];let f=a.slidesOffsetBefore;"function"==typeof f&&(f=a.slidesOffsetBefore.call(e));let v=a.slidesOffsetAfter;"function"==typeof v&&(v=a.slidesOffsetAfter.call(e));const w=e.snapGrid.length,b=e.slidesGrid.length;let x=a.spaceBetween,y=-f,E=0,T=0;if(void 0===r)return;"string"==typeof x&&x.indexOf("%")>=0&&(x=parseFloat(x.replace("%",""))/100*r),e.virtualSize=-x,n?c.css({marginLeft:"",marginBottom:"",marginTop:""}):c.css({marginRight:"",marginBottom:"",marginTop:""}),a.centeredSlides&&a.cssMode&&(g(e.wrapperEl,"--swiper-centered-offset-before",""),g(e.wrapperEl,"--swiper-centered-offset-after",""));const C=a.grid&&a.grid.rows>1&&e.grid;let $;C&&e.grid.initSlides(p);const S="auto"===a.slidesPerView&&a.breakpoints&&Object.keys(a.breakpoints).filter((e=>void 0!==a.breakpoints[e].slidesPerView)).length>0;for(let i=0;i1&&u.push(e.virtualSize-r)}if(0===u.length&&(u=[0]),0!==a.spaceBetween){const s=e.isHorizontal()&&n?"marginLeft":t("marginRight");c.filter(((e,t)=>!a.cssMode||t!==c.length-1)).css({[s]:`${x}px`})}if(a.centeredSlides&&a.centeredSlidesBounds){let e=0;m.forEach((t=>{e+=t+(a.spaceBetween?a.spaceBetween:0)})),e-=a.spaceBetween;const t=e-r;u=u.map((e=>e<0?-f:e>t?t+v:e))}if(a.centerInsufficientSlides){let e=0;if(m.forEach((t=>{e+=t+(a.spaceBetween?a.spaceBetween:0)})),e-=a.spaceBetween,e{u[s]=e-t})),h.forEach(((e,s)=>{h[s]=e+t}))}}if(Object.assign(e,{slides:c,snapGrid:u,slidesGrid:h,slidesSizesGrid:m}),a.centeredSlides&&a.cssMode&&!a.centeredSlidesBounds){g(e.wrapperEl,"--swiper-centered-offset-before",-u[0]+"px"),g(e.wrapperEl,"--swiper-centered-offset-after",e.size/2-m[m.length-1]/2+"px");const t=-e.snapGrid[0],s=-e.slidesGrid[0];e.snapGrid=e.snapGrid.map((e=>e+t)),e.slidesGrid=e.slidesGrid.map((e=>e+s))}p!==d&&e.emit("slidesLengthChange"),u.length!==w&&(e.params.watchOverflow&&e.checkOverflow(),e.emit("snapGridLengthChange")),h.length!==b&&e.emit("slidesGridLengthChange"),a.watchSlidesProgress&&e.updateSlidesOffset()},updateAutoHeight:function(e){const t=this,s=[],a=t.virtual&&t.params.virtual.enabled;let i,r=0;"number"==typeof e?t.setTransition(e):!0===e&&t.setTransition(t.params.speed);const n=e=>a?t.slides.filter((t=>parseInt(t.getAttribute("data-swiper-slide-index"),10)===e))[0]:t.slides.eq(e)[0];if("auto"!==t.params.slidesPerView&&t.params.slidesPerView>1)if(t.params.centeredSlides)t.visibleSlides.each((e=>{s.push(e)}));else for(i=0;it.slides.length&&!a)break;s.push(n(e))}else s.push(n(t.activeIndex));for(i=0;ir?e:r}r&&t.$wrapperEl.css("height",`${r}px`)},updateSlidesOffset:function(){const e=this,t=e.slides;for(let s=0;s=0&&p1&&u<=t.size||p<=0&&u>=t.size)&&(t.visibleSlides.push(l),t.visibleSlidesIndexes.push(e),a.eq(e).addClass(s.slideVisibleClass)),l.progress=i?-d:d,l.originalProgress=i?-c:c}t.visibleSlides=d(t.visibleSlides)},updateProgress:function(e){const t=this;if(void 0===e){const s=t.rtlTranslate?-1:1;e=t&&t.translate&&t.translate*s||0}const s=t.params,a=t.maxTranslate()-t.minTranslate();let{progress:i,isBeginning:r,isEnd:n}=t;const l=r,o=n;0===a?(i=0,r=!0,n=!0):(i=(e-t.minTranslate())/a,r=i<=0,n=i>=1),Object.assign(t,{progress:i,isBeginning:r,isEnd:n}),(s.watchSlidesProgress||s.centeredSlides&&s.autoHeight)&&t.updateSlidesProgress(e),r&&!l&&t.emit("reachBeginning toEdge"),n&&!o&&t.emit("reachEnd toEdge"),(l&&!r||o&&!n)&&t.emit("fromEdge"),t.emit("progress",i)},updateSlidesClasses:function(){const e=this,{slides:t,params:s,$wrapperEl:a,activeIndex:i,realIndex:r}=e,n=e.virtual&&s.virtual.enabled;let l;t.removeClass(`${s.slideActiveClass} ${s.slideNextClass} ${s.slidePrevClass} ${s.slideDuplicateActiveClass} ${s.slideDuplicateNextClass} ${s.slideDuplicatePrevClass}`),l=n?e.$wrapperEl.find(`.${s.slideClass}[data-swiper-slide-index="${i}"]`):t.eq(i),l.addClass(s.slideActiveClass),s.loop&&(l.hasClass(s.slideDuplicateClass)?a.children(`.${s.slideClass}:not(.${s.slideDuplicateClass})[data-swiper-slide-index="${r}"]`).addClass(s.slideDuplicateActiveClass):a.children(`.${s.slideClass}.${s.slideDuplicateClass}[data-swiper-slide-index="${r}"]`).addClass(s.slideDuplicateActiveClass));let o=l.nextAll(`.${s.slideClass}`).eq(0).addClass(s.slideNextClass);s.loop&&0===o.length&&(o=t.eq(0),o.addClass(s.slideNextClass));let d=l.prevAll(`.${s.slideClass}`).eq(0).addClass(s.slidePrevClass);s.loop&&0===d.length&&(d=t.eq(-1),d.addClass(s.slidePrevClass)),s.loop&&(o.hasClass(s.slideDuplicateClass)?a.children(`.${s.slideClass}:not(.${s.slideDuplicateClass})[data-swiper-slide-index="${o.attr("data-swiper-slide-index")}"]`).addClass(s.slideDuplicateNextClass):a.children(`.${s.slideClass}.${s.slideDuplicateClass}[data-swiper-slide-index="${o.attr("data-swiper-slide-index")}"]`).addClass(s.slideDuplicateNextClass),d.hasClass(s.slideDuplicateClass)?a.children(`.${s.slideClass}:not(.${s.slideDuplicateClass})[data-swiper-slide-index="${d.attr("data-swiper-slide-index")}"]`).addClass(s.slideDuplicatePrevClass):a.children(`.${s.slideClass}.${s.slideDuplicateClass}[data-swiper-slide-index="${d.attr("data-swiper-slide-index")}"]`).addClass(s.slideDuplicatePrevClass)),e.emitSlidesClasses()},updateActiveIndex:function(e){const t=this,s=t.rtlTranslate?t.translate:-t.translate,{slidesGrid:a,snapGrid:i,params:r,activeIndex:n,realIndex:l,snapIndex:o}=t;let d,c=e;if(void 0===c){for(let e=0;e=a[e]&&s=a[e]&&s=a[e]&&(c=e);r.normalizeSlideIndex&&(c<0||void 0===c)&&(c=0)}if(i.indexOf(s)>=0)d=i.indexOf(s);else{const e=Math.min(r.slidesPerGroupSkip,c);d=e+Math.floor((c-e)/r.slidesPerGroup)}if(d>=i.length&&(d=i.length-1),c===n)return void(d!==o&&(t.snapIndex=d,t.emit("snapIndexChange")));const p=parseInt(t.slides.eq(c).attr("data-swiper-slide-index")||c,10);Object.assign(t,{snapIndex:d,realIndex:p,previousIndex:n,activeIndex:c}),t.emit("activeIndexChange"),t.emit("snapIndexChange"),l!==p&&t.emit("realIndexChange"),(t.initialized||t.params.runCallbacksOnInit)&&t.emit("slideChange")},updateClickedSlide:function(e){const t=this,s=t.params,a=d(e.target).closest(`.${s.slideClass}`)[0];let i,r=!1;if(a)for(let e=0;eo?o:a&&e=o.length&&(g=o.length-1),(p||l.initialSlide||0)===(c||0)&&s&&r.emit("beforeSlideChangeStart");const w=-o[g];if(r.updateProgress(w),l.normalizeSlideIndex)for(let e=0;e=s&&t=s&&t=s&&(n=e)}if(r.initialized&&n!==p){if(!r.allowSlideNext&&wr.translate&&w>r.maxTranslate()&&(p||0)!==n)return!1}let b;if(b=n>p?"next":n{r.wrapperEl.style.scrollSnapType="",r._swiperImmediateVirtual=!1}))}else{if(!r.support.smoothScroll)return v({swiper:r,targetPosition:s,side:e?"left":"top"}),!0;h.scrollTo({[e?"left":"top"]:s,behavior:"smooth"})}return!0}return 0===t?(r.setTransition(0),r.setTranslate(w),r.updateActiveIndex(n),r.updateSlidesClasses(),r.emit("beforeTransitionStart",t,a),r.transitionStart(s,b),r.transitionEnd(s,b)):(r.setTransition(t),r.setTranslate(w),r.updateActiveIndex(n),r.updateSlidesClasses(),r.emit("beforeTransitionStart",t,a),r.transitionStart(s,b),r.animating||(r.animating=!0,r.onSlideToWrapperTransitionEnd||(r.onSlideToWrapperTransitionEnd=function(e){r&&!r.destroyed&&e.target===this&&(r.$wrapperEl[0].removeEventListener("transitionend",r.onSlideToWrapperTransitionEnd),r.$wrapperEl[0].removeEventListener("webkitTransitionEnd",r.onSlideToWrapperTransitionEnd),r.onSlideToWrapperTransitionEnd=null,delete r.onSlideToWrapperTransitionEnd,r.transitionEnd(s,b))}),r.$wrapperEl[0].addEventListener("transitionend",r.onSlideToWrapperTransitionEnd),r.$wrapperEl[0].addEventListener("webkitTransitionEnd",r.onSlideToWrapperTransitionEnd))),!0},slideToLoop:function(e=0,t=this.params.speed,s=!0,a){const i=this;let r=e;return i.params.loop&&(r+=i.loopedSlides),i.slideTo(r,t,s,a)},slideNext:function(e=this.params.speed,t=!0,s){const a=this,{animating:i,enabled:r,params:n}=a;if(!r)return a;let l=n.slidesPerGroup;"auto"===n.slidesPerView&&1===n.slidesPerGroup&&n.slidesPerGroupAuto&&(l=Math.max(a.slidesPerViewDynamic("current",!0),1));const o=a.activeIndexc(e)));let h=n[u.indexOf(p)-1];if(void 0===h&&i.cssMode){let e;n.forEach(((t,s)=>{p>=t&&(e=s)})),void 0!==e&&(h=n[e>0?e-1:e])}let m=0;return void 0!==h&&(m=l.indexOf(h),m<0&&(m=a.activeIndex-1),"auto"===i.slidesPerView&&1===i.slidesPerGroup&&i.slidesPerGroupAuto&&(m=m-a.slidesPerViewDynamic("previous",!0)+1,m=Math.max(m,0))),a.slideTo(m,e,t,s)},slideReset:function(e=this.params.speed,t=!0,s){return this.slideTo(this.activeIndex,e,t,s)},slideToClosest:function(e=this.params.speed,t=!0,s,a=.5){const i=this;let r=i.activeIndex;const n=Math.min(i.params.slidesPerGroupSkip,r),l=n+Math.floor((r-n)/i.params.slidesPerGroup),o=i.rtlTranslate?i.translate:-i.translate;if(o>=i.snapGrid[l]){const e=i.snapGrid[l];o-e>(i.snapGrid[l+1]-e)*a&&(r+=i.params.slidesPerGroup)}else{const e=i.snapGrid[l-1];o-e<=(i.snapGrid[l]-e)*a&&(r-=i.params.slidesPerGroup)}return r=Math.max(r,0),r=Math.min(r,i.slidesGrid.length-1),i.slideTo(r,e,t,s)},slideToClickedSlide:function(){const e=this,{params:t,$wrapperEl:s}=e,a="auto"===t.slidesPerView?e.slidesPerViewDynamic():t.slidesPerView;let i,r=e.clickedIndex;if(t.loop){if(e.animating)return;i=parseInt(d(e.clickedSlide).attr("data-swiper-slide-index"),10),t.centeredSlides?re.slides.length-e.loopedSlides+a/2?(e.loopFix(),r=s.children(`.${t.slideClass}[data-swiper-slide-index="${i}"]:not(.${t.slideDuplicateClass})`).eq(0).index(),p((()=>{e.slideTo(r)}))):e.slideTo(r):r>e.slides.length-a?(e.loopFix(),r=s.children(`.${t.slideClass}[data-swiper-slide-index="${i}"]:not(.${t.slideDuplicateClass})`).eq(0).index(),p((()=>{e.slideTo(r)}))):e.slideTo(r)}else e.slideTo(r)}},loop:{loopCreate:function(){const e=this,t=a(),{params:s,$wrapperEl:i}=e,r=d(i.children()[0].parentNode);r.children(`.${s.slideClass}.${s.slideDuplicateClass}`).remove();let n=r.children(`.${s.slideClass}`);if(s.loopFillGroupWithBlank){const e=s.slidesPerGroup-n.length%s.slidesPerGroup;if(e!==s.slidesPerGroup){for(let a=0;an.length&&(e.loopedSlides=n.length);const l=[],o=[];n.each(((t,s)=>{const a=d(t);s=n.length-e.loopedSlides&&l.push(t),a.attr("data-swiper-slide-index",s)}));for(let e=0;e=0;e-=1)r.prepend(d(l[e].cloneNode(!0)).addClass(s.slideDuplicateClass))},loopFix:function(){const e=this;e.emit("beforeLoopFix");const{activeIndex:t,slides:s,loopedSlides:a,allowSlidePrev:i,allowSlideNext:r,snapGrid:n,rtlTranslate:l}=e;let o;e.allowSlidePrev=!0,e.allowSlideNext=!0;const d=-n[t]-e.getTranslate();if(t=s.length-a){o=-s.length+t+a,o+=a;e.slideTo(o,0,!1,!0)&&0!==d&&e.setTranslate((l?-e.translate:e.translate)-d)}e.allowSlidePrev=i,e.allowSlideNext=r,e.emit("loopFix")},loopDestroy:function(){const{$wrapperEl:e,params:t,slides:s}=this;e.children(`.${t.slideClass}.${t.slideDuplicateClass},.${t.slideClass}.${t.slideBlankClass}`).remove(),s.removeAttr("data-swiper-slide-index")}},grabCursor:{setGrabCursor:function(e){const t=this;if(t.support.touch||!t.params.simulateTouch||t.params.watchOverflow&&t.isLocked||t.params.cssMode)return;const s="container"===t.params.touchEventsTarget?t.el:t.wrapperEl;s.style.cursor="move",s.style.cursor=e?"-webkit-grabbing":"-webkit-grab",s.style.cursor=e?"-moz-grabbin":"-moz-grab",s.style.cursor=e?"grabbing":"grab"},unsetGrabCursor:function(){const e=this;e.support.touch||e.params.watchOverflow&&e.isLocked||e.params.cssMode||(e["container"===e.params.touchEventsTarget?"el":"wrapperEl"].style.cursor="")}},events:{attachEvents:function(){const e=this,t=a(),{params:s,support:i}=e;e.onTouchStart=S.bind(e),e.onTouchMove=M.bind(e),e.onTouchEnd=P.bind(e),s.cssMode&&(e.onScroll=O.bind(e)),e.onClick=z.bind(e),i.touch&&!I&&(t.addEventListener("touchstart",L),I=!0),A(e,"on")},detachEvents:function(){A(this,"off")}},breakpoints:{setBreakpoint:function(){const e=this,{activeIndex:t,initialized:s,loopedSlides:a=0,params:i,$el:r}=e,n=i.breakpoints;if(!n||n&&0===Object.keys(n).length)return;const l=e.getBreakpoint(n,e.params.breakpointsBase,e.el);if(!l||e.currentBreakpoint===l)return;const o=(l in n?n[l]:void 0)||e.originalParams,d=D(e,i),c=D(e,o),p=i.enabled;d&&!c?(r.removeClass(`${i.containerModifierClass}grid ${i.containerModifierClass}grid-column`),e.emitContainerClasses()):!d&&c&&(r.addClass(`${i.containerModifierClass}grid`),(o.grid.fill&&"column"===o.grid.fill||!o.grid.fill&&"column"===i.grid.fill)&&r.addClass(`${i.containerModifierClass}grid-column`),e.emitContainerClasses());const u=o.direction&&o.direction!==i.direction,h=i.loop&&(o.slidesPerView!==i.slidesPerView||u);u&&s&&e.changeDirection(),f(e.params,o);const m=e.params.enabled;Object.assign(e,{allowTouchMove:e.params.allowTouchMove,allowSlideNext:e.params.allowSlideNext,allowSlidePrev:e.params.allowSlidePrev}),p&&!m?e.disable():!p&&m&&e.enable(),e.currentBreakpoint=l,e.emit("_beforeBreakpoint",o),h&&s&&(e.loopDestroy(),e.loopCreate(),e.updateSlides(),e.slideTo(t-a+e.loopedSlides,0,!1)),e.emit("breakpoint",o)},getBreakpoint:function(e,t="window",s){if(!e||"container"===t&&!s)return;let a=!1;const i=r(),n="window"===t?i.innerHeight:s.clientHeight,l=Object.keys(e).map((e=>{if("string"==typeof e&&0===e.indexOf("@")){const t=parseFloat(e.substr(1));return{value:n*t,point:e}}return{value:e,point:e}}));l.sort(((e,t)=>parseInt(e.value,10)-parseInt(t.value,10)));for(let e=0;es}else e.isLocked=1===e.snapGrid.length;!0===s.allowSlideNext&&(e.allowSlideNext=!e.isLocked),!0===s.allowSlidePrev&&(e.allowSlidePrev=!e.isLocked),t&&t!==e.isLocked&&(e.isEnd=!1),t!==e.isLocked&&e.emit(e.isLocked?"lock":"unlock")}},classes:{addClasses:function(){const e=this,{classNames:t,params:s,rtl:a,$el:i,device:r,support:n}=e,l=function(e,t){const s=[];return e.forEach((e=>{"object"==typeof e?Object.keys(e).forEach((a=>{e[a]&&s.push(t+a)})):"string"==typeof e&&s.push(t+e)})),s}(["initialized",s.direction,{"pointer-events":!n.touch},{"free-mode":e.params.freeMode&&s.freeMode.enabled},{autoheight:s.autoHeight},{rtl:a},{grid:s.grid&&s.grid.rows>1},{"grid-column":s.grid&&s.grid.rows>1&&"column"===s.grid.fill},{android:r.android},{ios:r.ios},{"css-mode":s.cssMode},{centered:s.cssMode&&s.centeredSlides}],s.containerModifierClass);t.push(...l),i.addClass([...t].join(" ")),e.emitContainerClasses()},removeClasses:function(){const{$el:e,classNames:t}=this;e.removeClass(t.join(" ")),this.emitContainerClasses()}},images:{loadImage:function(e,t,s,a,i,n){const l=r();let o;function c(){n&&n()}d(e).parent("picture")[0]||e.complete&&i?c():t?(o=new l.Image,o.onload=c,o.onerror=c,a&&(o.sizes=a),s&&(o.srcset=s),t&&(o.src=t)):c()},preloadImages:function(){const e=this;function t(){null!=e&&e&&!e.destroyed&&(void 0!==e.imagesLoaded&&(e.imagesLoaded+=1),e.imagesLoaded===e.imagesToLoad.length&&(e.params.updateOnImagesReady&&e.update(),e.emit("imagesReady")))}e.imagesToLoad=e.$el.find("img");for(let s=0;s1){const e=[];return d(s.el).each((t=>{const a=f({},s,{el:t});e.push(new H(a))})),e}const a=this;a.__swiper__=!0,a.support=y(),a.device=E({userAgent:s.userAgent}),a.browser=T(),a.eventsListeners={},a.eventsAnyListeners=[],a.modules=[...a.__modules__],s.modules&&Array.isArray(s.modules)&&a.modules.push(...s.modules);const i={};a.modules.forEach((e=>{e({swiper:a,extendParams:N(s,i),on:a.on.bind(a),once:a.once.bind(a),off:a.off.bind(a),emit:a.emit.bind(a)})}));const r=f({},G,i);return a.params=f({},r,X,s),a.originalParams=f({},a.params),a.passedParams=f({},s),a.params&&a.params.on&&Object.keys(a.params.on).forEach((e=>{a.on(e,a.params.on[e])})),a.params&&a.params.onAny&&a.onAny(a.params.onAny),a.$=d,Object.assign(a,{enabled:a.params.enabled,el:t,classNames:[],slides:d(),slidesGrid:[],snapGrid:[],slidesSizesGrid:[],isHorizontal:()=>"horizontal"===a.params.direction,isVertical:()=>"vertical"===a.params.direction,activeIndex:0,realIndex:0,isBeginning:!0,isEnd:!1,translate:0,previousTranslate:0,progress:0,velocity:0,animating:!1,allowSlideNext:a.params.allowSlideNext,allowSlidePrev:a.params.allowSlidePrev,touchEvents:function(){const e=["touchstart","touchmove","touchend","touchcancel"],t=["pointerdown","pointermove","pointerup"];return a.touchEventsTouch={start:e[0],move:e[1],end:e[2],cancel:e[3]},a.touchEventsDesktop={start:t[0],move:t[1],end:t[2]},a.support.touch||!a.params.simulateTouch?a.touchEventsTouch:a.touchEventsDesktop}(),touchEventsData:{isTouched:void 0,isMoved:void 0,allowTouchCallbacks:void 0,touchStartTime:void 0,isScrolling:void 0,currentTranslate:void 0,startTranslate:void 0,allowThresholdMove:void 0,focusableElements:a.params.focusableElements,lastClickTime:u(),clickTimeout:void 0,velocities:[],allowMomentumBounce:void 0,isTouchEvent:void 0,startMoving:void 0},allowClick:!0,allowTouchMove:a.params.allowTouchMove,touches:{startX:0,startY:0,currentX:0,currentY:0,diff:0},imagesToLoad:[],imagesLoaded:0}),a.emit("_swiper"),a.params.init&&a.init(),a}enable(){const e=this;e.enabled||(e.enabled=!0,e.params.grabCursor&&e.setGrabCursor(),e.emit("enable"))}disable(){const e=this;e.enabled&&(e.enabled=!1,e.params.grabCursor&&e.unsetGrabCursor(),e.emit("disable"))}setProgress(e,t){const s=this;e=Math.min(Math.max(e,0),1);const a=s.minTranslate(),i=(s.maxTranslate()-a)*e+a;s.translateTo(i,void 0===t?0:t),s.updateActiveIndex(),s.updateSlidesClasses()}emitContainerClasses(){const e=this;if(!e.params._emitClasses||!e.el)return;const t=e.el.className.split(" ").filter((t=>0===t.indexOf("swiper")||0===t.indexOf(e.params.containerModifierClass)));e.emit("_containerClasses",t.join(" "))}getSlideClasses(e){const t=this;return e.className.split(" ").filter((e=>0===e.indexOf("swiper-slide")||0===e.indexOf(t.params.slideClass))).join(" ")}emitSlidesClasses(){const e=this;if(!e.params._emitClasses||!e.el)return;const t=[];e.slides.each((s=>{const a=e.getSlideClasses(s);t.push({slideEl:s,classNames:a}),e.emit("_slideClass",s,a)})),e.emit("_slideClasses",t)}slidesPerViewDynamic(e="current",t=!1){const{params:s,slides:a,slidesGrid:i,slidesSizesGrid:r,size:n,activeIndex:l}=this;let o=1;if(s.centeredSlides){let e,t=a[l].swiperSlideSize;for(let s=l+1;sn&&(e=!0));for(let s=l-1;s>=0;s-=1)a[s]&&!e&&(t+=a[s].swiperSlideSize,o+=1,t>n&&(e=!0))}else if("current"===e)for(let e=l+1;e=0;e-=1){i[l]-i[e]1)&&e.isEnd&&!e.params.centeredSlides?e.slideTo(e.slides.length-1,0,!1,!0):e.slideTo(e.activeIndex,0,!1,!0),i||a()),s.watchOverflow&&t!==e.snapGrid&&e.checkOverflow(),e.emit("update")}changeDirection(e,t=!0){const s=this,a=s.params.direction;return e||(e="horizontal"===a?"vertical":"horizontal"),e===a||"horizontal"!==e&&"vertical"!==e||(s.$el.removeClass(`${s.params.containerModifierClass}${a}`).addClass(`${s.params.containerModifierClass}${e}`),s.emitContainerClasses(),s.params.direction=e,s.slides.each((t=>{"vertical"===e?t.style.width="":t.style.height=""})),s.emit("changeDirection"),t&&s.update()),s}mount(e){const t=this;if(t.mounted)return!0;const s=d(e||t.params.el);if(!(e=s[0]))return!1;e.swiper=t;const i=()=>`.${(t.params.wrapperClass||"").trim().split(" ").join(".")}`;let r=(()=>{if(e&&e.shadowRoot&&e.shadowRoot.querySelector){const t=d(e.shadowRoot.querySelector(i()));return t.children=e=>s.children(e),t}return s.children(i())})();if(0===r.length&&t.params.createElements){const e=a().createElement("div");r=d(e),e.className=t.params.wrapperClass,s.append(e),s.children(`.${t.params.slideClass}`).each((e=>{r.append(e)}))}return Object.assign(t,{$el:s,el:e,$wrapperEl:r,wrapperEl:r[0],mounted:!0,rtl:"rtl"===e.dir.toLowerCase()||"rtl"===s.css("direction"),rtlTranslate:"horizontal"===t.params.direction&&("rtl"===e.dir.toLowerCase()||"rtl"===s.css("direction")),wrongRTL:"-webkit-box"===r.css("display")}),!0}init(e){const t=this;if(t.initialized)return t;return!1===t.mount(e)||(t.emit("beforeInit"),t.params.breakpoints&&t.setBreakpoint(),t.addClasses(),t.params.loop&&t.loopCreate(),t.updateSize(),t.updateSlides(),t.params.watchOverflow&&t.checkOverflow(),t.params.grabCursor&&t.enabled&&t.setGrabCursor(),t.params.preloadImages&&t.preloadImages(),t.params.loop?t.slideTo(t.params.initialSlide+t.loopedSlides,0,t.params.runCallbacksOnInit,!1,!0):t.slideTo(t.params.initialSlide,0,t.params.runCallbacksOnInit,!1,!0),t.attachEvents(),t.initialized=!0,t.emit("init"),t.emit("afterInit")),t}destroy(e=!0,t=!0){const s=this,{params:a,$el:i,$wrapperEl:r,slides:n}=s;return void 0===s.params||s.destroyed||(s.emit("beforeDestroy"),s.initialized=!1,s.detachEvents(),a.loop&&s.loopDestroy(),t&&(s.removeClasses(),i.removeAttr("style"),r.removeAttr("style"),n&&n.length&&n.removeClass([a.slideVisibleClass,a.slideActiveClass,a.slideNextClass,a.slidePrevClass].join(" ")).removeAttr("style").removeAttr("data-swiper-slide-index")),s.emit("destroy"),Object.keys(s.eventsListeners).forEach((e=>{s.off(e)})),!1!==e&&(s.$el[0].swiper=null,function(e){const t=e;Object.keys(t).forEach((e=>{try{t[e]=null}catch(e){}try{delete t[e]}catch(e){}}))}(s)),s.destroyed=!0),null}static extendDefaults(e){f(X,e)}static get extendedDefaults(){return X}static get defaults(){return G}static installModule(e){H.prototype.__modules__||(H.prototype.__modules__=[]);const t=H.prototype.__modules__;"function"==typeof e&&t.indexOf(e)<0&&t.push(e)}static use(e){return Array.isArray(e)?(e.forEach((e=>H.installModule(e))),H):(H.installModule(e),H)}}function Y(e,t,s,i){const r=a();return e.params.createElements&&Object.keys(i).forEach((a=>{if(!s[a]&&!0===s.auto){let n=e.$el.children(`.${i[a]}`)[0];n||(n=r.createElement("div"),n.className=i[a],e.$el.append(n)),s[a]=n,t[a]=n}})),s}function W(e=""){return`.${e.trim().replace(/([\.:!\/])/g,"\\$1").replace(/ /g,".")}`}function R(e){const t=this,{$wrapperEl:s,params:a}=t;if(a.loop&&t.loopDestroy(),"object"==typeof e&&"length"in e)for(let t=0;t=l)return void s.appendSlide(t);let o=n>e?n+1:n;const d=[];for(let t=l-1;t>=e;t-=1){const e=s.slides.eq(t);e.remove(),d.unshift(e)}if("object"==typeof t&&"length"in t){for(let e=0;ee?n+t.length:n}else a.append(t);for(let e=0;e{if(s.params.effect!==t)return;s.classNames.push(`${s.params.containerModifierClass}${t}`),l&&l()&&s.classNames.push(`${s.params.containerModifierClass}3d`);const e=n?n():{};Object.assign(s.params,e),Object.assign(s.originalParams,e)})),a("setTranslate",(()=>{s.params.effect===t&&i()})),a("setTransition",((e,a)=>{s.params.effect===t&&r(a)}))}function U(e,t){return e.transformEl?t.find(e.transformEl).css({"backface-visibility":"hidden","-webkit-backface-visibility":"hidden"}):t}function K({swiper:e,duration:t,transformEl:s,allSlides:a}){const{slides:i,activeIndex:r,$wrapperEl:n}=e;if(e.params.virtualTranslate&&0!==t){let t,l=!1;t=a?s?i.find(s):i:s?i.eq(r).find(s):i.eq(r),t.transitionEnd((()=>{if(l)return;if(!e||e.destroyed)return;l=!0,e.animating=!1;const t=["webkitTransitionEnd","transitionend"];for(let e=0;e`),i.append(r)),r}Object.keys(B).forEach((e=>{Object.keys(B[e]).forEach((t=>{H.prototype[t]=B[e][t]}))})),H.use([function({swiper:e,on:t,emit:s}){const a=r();let i=null;const n=()=>{e&&!e.destroyed&&e.initialized&&(s("beforeResize"),s("resize"))},l=()=>{e&&!e.destroyed&&e.initialized&&s("orientationchange")};t("init",(()=>{e.params.resizeObserver&&void 0!==a.ResizeObserver?e&&!e.destroyed&&e.initialized&&(i=new ResizeObserver((t=>{const{width:s,height:a}=e;let i=s,r=a;t.forEach((({contentBoxSize:t,contentRect:s,target:a})=>{a&&a!==e.el||(i=s?s.width:(t[0]||t).inlineSize,r=s?s.height:(t[0]||t).blockSize)})),i===s&&r===a||n()})),i.observe(e.el)):(a.addEventListener("resize",n),a.addEventListener("orientationchange",l))})),t("destroy",(()=>{i&&i.unobserve&&e.el&&(i.unobserve(e.el),i=null),a.removeEventListener("resize",n),a.removeEventListener("orientationchange",l)}))},function({swiper:e,extendParams:t,on:s,emit:a}){const i=[],n=r(),l=(e,t={})=>{const s=new(n.MutationObserver||n.WebkitMutationObserver)((e=>{if(1===e.length)return void a("observerUpdate",e[0]);const t=function(){a("observerUpdate",e[0])};n.requestAnimationFrame?n.requestAnimationFrame(t):n.setTimeout(t,0)}));s.observe(e,{attributes:void 0===t.attributes||t.attributes,childList:void 0===t.childList||t.childList,characterData:void 0===t.characterData||t.characterData}),i.push(s)};t({observer:!1,observeParents:!1,observeSlideChildren:!1}),s("init",(()=>{if(e.params.observer){if(e.params.observeParents){const t=e.$el.parents();for(let e=0;e{i.forEach((e=>{e.disconnect()})),i.splice(0,i.length)}))}]);const J=[function({swiper:e,extendParams:t,on:s}){let a;function i(t,s){const a=e.params.virtual;if(a.cache&&e.virtual.cache[s])return e.virtual.cache[s];const i=a.renderSlide?d(a.renderSlide.call(e,t,s)):d(`
    ${t}
    `);return i.attr("data-swiper-slide-index")||i.attr("data-swiper-slide-index",s),a.cache&&(e.virtual.cache[s]=i),i}function r(t){const{slidesPerView:s,slidesPerGroup:a,centeredSlides:r}=e.params,{addSlidesBefore:n,addSlidesAfter:l}=e.params.virtual,{from:o,to:d,slides:c,slidesGrid:p,offset:u}=e.virtual;e.params.cssMode||e.updateActiveIndex();const h=e.activeIndex||0;let m,f,g;m=e.rtlTranslate?"right":e.isHorizontal()?"left":"top",r?(f=Math.floor(s/2)+a+l,g=Math.floor(s/2)+a+n):(f=s+(a-1)+l,g=a+n);const v=Math.max((h||0)-g,0),w=Math.min((h||0)+f,c.length-1),b=(e.slidesGrid[v]||0)-(e.slidesGrid[0]||0);function x(){e.updateSlides(),e.updateProgress(),e.updateSlidesClasses(),e.lazy&&e.params.lazy.enabled&&e.lazy.load()}if(Object.assign(e.virtual,{from:v,to:w,offset:b,slidesGrid:e.slidesGrid}),o===v&&d===w&&!t)return e.slidesGrid!==p&&b!==u&&e.slides.css(m,`${b}px`),void e.updateProgress();if(e.params.virtual.renderExternal)return e.params.virtual.renderExternal.call(e,{offset:b,from:v,to:w,slides:function(){const e=[];for(let t=v;t<=w;t+=1)e.push(c[t]);return e}()}),void(e.params.virtual.renderExternalUpdate&&x());const y=[],E=[];if(t)e.$wrapperEl.find(`.${e.params.slideClass}`).remove();else for(let t=o;t<=d;t+=1)(tw)&&e.$wrapperEl.find(`.${e.params.slideClass}[data-swiper-slide-index="${t}"]`).remove();for(let e=0;e=v&&e<=w&&(void 0===d||t?E.push(e):(e>d&&E.push(e),e{e.$wrapperEl.append(i(c[t],t))})),y.sort(((e,t)=>t-e)).forEach((t=>{e.$wrapperEl.prepend(i(c[t],t))})),e.$wrapperEl.children(".swiper-slide").css(m,`${b}px`),x()}t({virtual:{enabled:!1,slides:[],cache:!0,renderSlide:null,renderExternal:null,renderExternalUpdate:!0,addSlidesBefore:0,addSlidesAfter:0}}),e.virtual={cache:{},from:void 0,to:void 0,slides:[],offset:0,slidesGrid:[]},s("beforeInit",(()=>{e.params.virtual.enabled&&(e.virtual.slides=e.params.virtual.slides,e.classNames.push(`${e.params.containerModifierClass}virtual`),e.params.watchSlidesProgress=!0,e.originalParams.watchSlidesProgress=!0,e.params.initialSlide||r())})),s("setTranslate",(()=>{e.params.virtual.enabled&&(e.params.cssMode&&!e._immediateVirtual?(clearTimeout(a),a=setTimeout((()=>{r()}),100)):r())})),s("init update resize",(()=>{e.params.virtual.enabled&&e.params.cssMode&&g(e.wrapperEl,"--swiper-virtual-size",`${e.virtualSize}px`)})),Object.assign(e.virtual,{appendSlide:function(t){if("object"==typeof t&&"length"in t)for(let s=0;s{const a=t[e],r=a.attr("data-swiper-slide-index");r&&a.attr("data-swiper-slide-index",parseInt(r,10)+i),s[parseInt(e,10)+i]=a})),e.virtual.cache=s}r(!0),e.slideTo(a,0)},removeSlide:function(t){if(null==t)return;let s=e.activeIndex;if(Array.isArray(t))for(let a=t.length-1;a>=0;a-=1)e.virtual.slides.splice(t[a],1),e.params.virtual.cache&&delete e.virtual.cache[t[a]],t[a]0&&0===e.$el.parents(`.${e.params.slideActiveClass}`).length)return;const a=e.$el,i=a[0].clientWidth,r=a[0].clientHeight,n=l.innerWidth,o=l.innerHeight,d=e.$el.offset();s&&(d.left-=e.$el[0].scrollLeft);const c=[[d.left,d.top],[d.left+i,d.top],[d.left,d.top+r],[d.left+i,d.top+r]];for(let e=0;e=0&&s[0]<=n&&s[1]>=0&&s[1]<=o){if(0===s[0]&&0===s[1])continue;t=!0}}if(!t)return}e.isHorizontal()?((d||c||p||u)&&(a.preventDefault?a.preventDefault():a.returnValue=!1),((c||u)&&!s||(d||p)&&s)&&e.slideNext(),((d||p)&&!s||(c||u)&&s)&&e.slidePrev()):((d||c||h||m)&&(a.preventDefault?a.preventDefault():a.returnValue=!1),(c||m)&&e.slideNext(),(d||h)&&e.slidePrev()),i("keyPress",r)}}function c(){e.keyboard.enabled||(d(n).on("keydown",o),e.keyboard.enabled=!0)}function p(){e.keyboard.enabled&&(d(n).off("keydown",o),e.keyboard.enabled=!1)}e.keyboard={enabled:!1},t({keyboard:{enabled:!1,onlyInViewport:!0,pageUpDown:!0}}),s("init",(()=>{e.params.keyboard.enabled&&c()})),s("destroy",(()=>{e.keyboard.enabled&&p()})),Object.assign(e.keyboard,{enable:c,disable:p})},function({swiper:e,extendParams:t,on:s,emit:a}){const i=r();let n;t({mousewheel:{enabled:!1,releaseOnEdges:!1,invert:!1,forceToAxis:!1,sensitivity:1,eventsTarget:"container",thresholdDelta:null,thresholdTime:null}}),e.mousewheel={enabled:!1};let l,o=u();const c=[];function h(){e.enabled&&(e.mouseEntered=!0)}function m(){e.enabled&&(e.mouseEntered=!1)}function f(t){return!(e.params.mousewheel.thresholdDelta&&t.delta=6&&u()-o<60||(t.direction<0?e.isEnd&&!e.params.loop||e.animating||(e.slideNext(),a("scroll",t.raw)):e.isBeginning&&!e.params.loop||e.animating||(e.slidePrev(),a("scroll",t.raw)),o=(new i.Date).getTime(),!1)))}function g(t){let s=t,i=!0;if(!e.enabled)return;const r=e.params.mousewheel;e.params.cssMode&&s.preventDefault();let o=e.$el;if("container"!==e.params.mousewheel.eventsTarget&&(o=d(e.params.mousewheel.eventsTarget)),!e.mouseEntered&&!o[0].contains(s.target)&&!r.releaseOnEdges)return!0;s.originalEvent&&(s=s.originalEvent);let h=0;const m=e.rtlTranslate?-1:1,g=function(e){let t=0,s=0,a=0,i=0;return"detail"in e&&(s=e.detail),"wheelDelta"in e&&(s=-e.wheelDelta/120),"wheelDeltaY"in e&&(s=-e.wheelDeltaY/120),"wheelDeltaX"in e&&(t=-e.wheelDeltaX/120),"axis"in e&&e.axis===e.HORIZONTAL_AXIS&&(t=s,s=0),a=10*t,i=10*s,"deltaY"in e&&(i=e.deltaY),"deltaX"in e&&(a=e.deltaX),e.shiftKey&&!a&&(a=i,i=0),(a||i)&&e.deltaMode&&(1===e.deltaMode?(a*=40,i*=40):(a*=800,i*=800)),a&&!t&&(t=a<1?-1:1),i&&!s&&(s=i<1?-1:1),{spinX:t,spinY:s,pixelX:a,pixelY:i}}(s);if(r.forceToAxis)if(e.isHorizontal()){if(!(Math.abs(g.pixelX)>Math.abs(g.pixelY)))return!0;h=-g.pixelX*m}else{if(!(Math.abs(g.pixelY)>Math.abs(g.pixelX)))return!0;h=-g.pixelY}else h=Math.abs(g.pixelX)>Math.abs(g.pixelY)?-g.pixelX*m:-g.pixelY;if(0===h)return!0;r.invert&&(h=-h);let v=e.getTranslate()+h*r.sensitivity;if(v>=e.minTranslate()&&(v=e.minTranslate()),v<=e.maxTranslate()&&(v=e.maxTranslate()),i=!!e.params.loop||!(v===e.minTranslate()||v===e.maxTranslate()),i&&e.params.nested&&s.stopPropagation(),e.params.freeMode&&e.params.freeMode.enabled){const t={time:u(),delta:Math.abs(h),direction:Math.sign(h)},i=l&&t.time=e.minTranslate()&&(o=e.minTranslate()),o<=e.maxTranslate()&&(o=e.maxTranslate()),e.setTransition(0),e.setTranslate(o),e.updateProgress(),e.updateActiveIndex(),e.updateSlidesClasses(),(!d&&e.isBeginning||!u&&e.isEnd)&&e.updateSlidesClasses(),e.params.freeMode.sticky){clearTimeout(n),n=void 0,c.length>=15&&c.shift();const s=c.length?c[c.length-1]:void 0,a=c[0];if(c.push(t),s&&(t.delta>s.delta||t.direction!==s.direction))c.splice(0);else if(c.length>=15&&t.time-a.time<500&&a.delta-t.delta>=1&&t.delta<=6){const s=h>0?.8:.2;l=t,c.splice(0),n=p((()=>{e.slideToClosest(e.params.speed,!0,void 0,s)}),0)}n||(n=p((()=>{l=t,c.splice(0),e.slideToClosest(e.params.speed,!0,void 0,.5)}),500))}if(i||a("scroll",s),e.params.autoplay&&e.params.autoplayDisableOnInteraction&&e.autoplay.stop(),o===e.minTranslate()||o===e.maxTranslate())return!0}}else{const s={time:u(),delta:Math.abs(h),direction:Math.sign(h),raw:t};c.length>=2&&c.shift();const a=c.length?c[c.length-1]:void 0;if(c.push(s),a?(s.direction!==a.direction||s.delta>a.delta||s.time>a.time+150)&&f(s):f(s),function(t){const s=e.params.mousewheel;if(t.direction<0){if(e.isEnd&&!e.params.loop&&s.releaseOnEdges)return!0}else if(e.isBeginning&&!e.params.loop&&s.releaseOnEdges)return!0;return!1}(s))return!0}return s.preventDefault?s.preventDefault():s.returnValue=!1,!1}function v(t){let s=e.$el;"container"!==e.params.mousewheel.eventsTarget&&(s=d(e.params.mousewheel.eventsTarget)),s[t]("mouseenter",h),s[t]("mouseleave",m),s[t]("wheel",g)}function w(){return e.params.cssMode?(e.wrapperEl.removeEventListener("wheel",g),!0):!e.mousewheel.enabled&&(v("on"),e.mousewheel.enabled=!0,!0)}function b(){return e.params.cssMode?(e.wrapperEl.addEventListener(event,g),!0):!!e.mousewheel.enabled&&(v("off"),e.mousewheel.enabled=!1,!0)}s("init",(()=>{!e.params.mousewheel.enabled&&e.params.cssMode&&b(),e.params.mousewheel.enabled&&w()})),s("destroy",(()=>{e.params.cssMode&&w(),e.mousewheel.enabled&&b()})),Object.assign(e.mousewheel,{enable:w,disable:b})},function({swiper:e,extendParams:t,on:s,emit:a}){function i(t){let s;return t&&(s=d(t),e.params.uniqueNavElements&&"string"==typeof t&&s.length>1&&1===e.$el.find(t).length&&(s=e.$el.find(t))),s}function r(t,s){const a=e.params.navigation;t&&t.length>0&&(t[s?"addClass":"removeClass"](a.disabledClass),t[0]&&"BUTTON"===t[0].tagName&&(t[0].disabled=s),e.params.watchOverflow&&e.enabled&&t[e.isLocked?"addClass":"removeClass"](a.lockClass))}function n(){if(e.params.loop)return;const{$nextEl:t,$prevEl:s}=e.navigation;r(s,e.isBeginning),r(t,e.isEnd)}function l(t){t.preventDefault(),e.isBeginning&&!e.params.loop||e.slidePrev()}function o(t){t.preventDefault(),e.isEnd&&!e.params.loop||e.slideNext()}function c(){const t=e.params.navigation;if(e.params.navigation=Y(e,e.originalParams.navigation,e.params.navigation,{nextEl:"swiper-button-next",prevEl:"swiper-button-prev"}),!t.nextEl&&!t.prevEl)return;const s=i(t.nextEl),a=i(t.prevEl);s&&s.length>0&&s.on("click",o),a&&a.length>0&&a.on("click",l),Object.assign(e.navigation,{$nextEl:s,nextEl:s&&s[0],$prevEl:a,prevEl:a&&a[0]}),e.enabled||(s&&s.addClass(t.lockClass),a&&a.addClass(t.lockClass))}function p(){const{$nextEl:t,$prevEl:s}=e.navigation;t&&t.length&&(t.off("click",o),t.removeClass(e.params.navigation.disabledClass)),s&&s.length&&(s.off("click",l),s.removeClass(e.params.navigation.disabledClass))}t({navigation:{nextEl:null,prevEl:null,hideOnClick:!1,disabledClass:"swiper-button-disabled",hiddenClass:"swiper-button-hidden",lockClass:"swiper-button-lock"}}),e.navigation={nextEl:null,$nextEl:null,prevEl:null,$prevEl:null},s("init",(()=>{c(),n()})),s("toEdge fromEdge lock unlock",(()=>{n()})),s("destroy",(()=>{p()})),s("enable disable",(()=>{const{$nextEl:t,$prevEl:s}=e.navigation;t&&t[e.enabled?"removeClass":"addClass"](e.params.navigation.lockClass),s&&s[e.enabled?"removeClass":"addClass"](e.params.navigation.lockClass)})),s("click",((t,s)=>{const{$nextEl:i,$prevEl:r}=e.navigation,n=s.target;if(e.params.navigation.hideOnClick&&!d(n).is(r)&&!d(n).is(i)){if(e.pagination&&e.params.pagination&&e.params.pagination.clickable&&(e.pagination.el===n||e.pagination.el.contains(n)))return;let t;i?t=i.hasClass(e.params.navigation.hiddenClass):r&&(t=r.hasClass(e.params.navigation.hiddenClass)),a(!0===t?"navigationShow":"navigationHide"),i&&i.toggleClass(e.params.navigation.hiddenClass),r&&r.toggleClass(e.params.navigation.hiddenClass)}})),Object.assign(e.navigation,{update:n,init:c,destroy:p})},function({swiper:e,extendParams:t,on:s,emit:a}){const i="swiper-pagination";let r;t({pagination:{el:null,bulletElement:"span",clickable:!1,hideOnClick:!1,renderBullet:null,renderProgressbar:null,renderFraction:null,renderCustom:null,progressbarOpposite:!1,type:"bullets",dynamicBullets:!1,dynamicMainBullets:1,formatFractionCurrent:e=>e,formatFractionTotal:e=>e,bulletClass:`${i}-bullet`,bulletActiveClass:`${i}-bullet-active`,modifierClass:`${i}-`,currentClass:`${i}-current`,totalClass:`${i}-total`,hiddenClass:`${i}-hidden`,progressbarFillClass:`${i}-progressbar-fill`,progressbarOppositeClass:`${i}-progressbar-opposite`,clickableClass:`${i}-clickable`,lockClass:`${i}-lock`,horizontalClass:`${i}-horizontal`,verticalClass:`${i}-vertical`}}),e.pagination={el:null,$el:null,bullets:[]};let n=0;function l(){return!e.params.pagination.el||!e.pagination.el||!e.pagination.$el||0===e.pagination.$el.length}function o(t,s){const{bulletActiveClass:a}=e.params.pagination;t[s]().addClass(`${a}-${s}`)[s]().addClass(`${a}-${s}-${s}`)}function c(){const t=e.rtl,s=e.params.pagination;if(l())return;const i=e.virtual&&e.params.virtual.enabled?e.virtual.slides.length:e.slides.length,c=e.pagination.$el;let p;const u=e.params.loop?Math.ceil((i-2*e.loopedSlides)/e.params.slidesPerGroup):e.snapGrid.length;if(e.params.loop?(p=Math.ceil((e.activeIndex-e.loopedSlides)/e.params.slidesPerGroup),p>i-1-2*e.loopedSlides&&(p-=i-2*e.loopedSlides),p>u-1&&(p-=u),p<0&&"bullets"!==e.params.paginationType&&(p=u+p)):p=void 0!==e.snapIndex?e.snapIndex:e.activeIndex||0,"bullets"===s.type&&e.pagination.bullets&&e.pagination.bullets.length>0){const a=e.pagination.bullets;let i,l,u;if(s.dynamicBullets&&(r=a.eq(0)[e.isHorizontal()?"outerWidth":"outerHeight"](!0),c.css(e.isHorizontal()?"width":"height",r*(s.dynamicMainBullets+4)+"px"),s.dynamicMainBullets>1&&void 0!==e.previousIndex&&(n+=p-e.previousIndex,n>s.dynamicMainBullets-1?n=s.dynamicMainBullets-1:n<0&&(n=0)),i=p-n,l=i+(Math.min(a.length,s.dynamicMainBullets)-1),u=(l+i)/2),a.removeClass(["","-next","-next-next","-prev","-prev-prev","-main"].map((e=>`${s.bulletActiveClass}${e}`)).join(" ")),c.length>1)a.each((e=>{const t=d(e),a=t.index();a===p&&t.addClass(s.bulletActiveClass),s.dynamicBullets&&(a>=i&&a<=l&&t.addClass(`${s.bulletActiveClass}-main`),a===i&&o(t,"prev"),a===l&&o(t,"next"))}));else{const t=a.eq(p),r=t.index();if(t.addClass(s.bulletActiveClass),s.dynamicBullets){const t=a.eq(i),n=a.eq(l);for(let e=i;e<=l;e+=1)a.eq(e).addClass(`${s.bulletActiveClass}-main`);if(e.params.loop)if(r>=a.length-s.dynamicMainBullets){for(let e=s.dynamicMainBullets;e>=0;e-=1)a.eq(a.length-e).addClass(`${s.bulletActiveClass}-main`);a.eq(a.length-s.dynamicMainBullets-1).addClass(`${s.bulletActiveClass}-prev`)}else o(t,"prev"),o(n,"next");else o(t,"prev"),o(n,"next")}}if(s.dynamicBullets){const i=Math.min(a.length,s.dynamicMainBullets+4),n=(r*i-r)/2-u*r,l=t?"right":"left";a.css(e.isHorizontal()?l:"top",`${n}px`)}}if("fraction"===s.type&&(c.find(W(s.currentClass)).text(s.formatFractionCurrent(p+1)),c.find(W(s.totalClass)).text(s.formatFractionTotal(u))),"progressbar"===s.type){let t;t=s.progressbarOpposite?e.isHorizontal()?"vertical":"horizontal":e.isHorizontal()?"horizontal":"vertical";const a=(p+1)/u;let i=1,r=1;"horizontal"===t?i=a:r=a,c.find(W(s.progressbarFillClass)).transform(`translate3d(0,0,0) scaleX(${i}) scaleY(${r})`).transition(e.params.speed)}"custom"===s.type&&s.renderCustom?(c.html(s.renderCustom(e,p+1,u)),a("paginationRender",c[0])):a("paginationUpdate",c[0]),e.params.watchOverflow&&e.enabled&&c[e.isLocked?"addClass":"removeClass"](s.lockClass)}function p(){const t=e.params.pagination;if(l())return;const s=e.virtual&&e.params.virtual.enabled?e.virtual.slides.length:e.slides.length,i=e.pagination.$el;let r="";if("bullets"===t.type){let a=e.params.loop?Math.ceil((s-2*e.loopedSlides)/e.params.slidesPerGroup):e.snapGrid.length;e.params.freeMode&&e.params.freeMode.enabled&&!e.params.loop&&a>s&&(a=s);for(let s=0;s`;i.html(r),e.pagination.bullets=i.find(W(t.bulletClass))}"fraction"===t.type&&(r=t.renderFraction?t.renderFraction.call(e,t.currentClass,t.totalClass):` / `,i.html(r)),"progressbar"===t.type&&(r=t.renderProgressbar?t.renderProgressbar.call(e,t.progressbarFillClass):``,i.html(r)),"custom"!==t.type&&a("paginationRender",e.pagination.$el[0])}function u(){e.params.pagination=Y(e,e.originalParams.pagination,e.params.pagination,{el:"swiper-pagination"});const t=e.params.pagination;if(!t.el)return;let s=d(t.el);0!==s.length&&(e.params.uniqueNavElements&&"string"==typeof t.el&&s.length>1&&(s=e.$el.find(t.el),s.length>1&&(s=s.filter((t=>d(t).parents(".swiper")[0]===e.el)))),"bullets"===t.type&&t.clickable&&s.addClass(t.clickableClass),s.addClass(t.modifierClass+t.type),s.addClass(t.modifierClass+e.params.direction),"bullets"===t.type&&t.dynamicBullets&&(s.addClass(`${t.modifierClass}${t.type}-dynamic`),n=0,t.dynamicMainBullets<1&&(t.dynamicMainBullets=1)),"progressbar"===t.type&&t.progressbarOpposite&&s.addClass(t.progressbarOppositeClass),t.clickable&&s.on("click",W(t.bulletClass),(function(t){t.preventDefault();let s=d(this).index()*e.params.slidesPerGroup;e.params.loop&&(s+=e.loopedSlides),e.slideTo(s)})),Object.assign(e.pagination,{$el:s,el:s[0]}),e.enabled||s.addClass(t.lockClass))}function h(){const t=e.params.pagination;if(l())return;const s=e.pagination.$el;s.removeClass(t.hiddenClass),s.removeClass(t.modifierClass+t.type),s.removeClass(t.modifierClass+e.params.direction),e.pagination.bullets&&e.pagination.bullets.removeClass&&e.pagination.bullets.removeClass(t.bulletActiveClass),t.clickable&&s.off("click",W(t.bulletClass))}s("init",(()=>{u(),p(),c()})),s("activeIndexChange",(()=>{(e.params.loop||void 0===e.snapIndex)&&c()})),s("snapIndexChange",(()=>{e.params.loop||c()})),s("slidesLengthChange",(()=>{e.params.loop&&(p(),c())})),s("snapGridLengthChange",(()=>{e.params.loop||(p(),c())})),s("destroy",(()=>{h()})),s("enable disable",(()=>{const{$el:t}=e.pagination;t&&t[e.enabled?"removeClass":"addClass"](e.params.pagination.lockClass)})),s("lock unlock",(()=>{c()})),s("click",((t,s)=>{const i=s.target,{$el:r}=e.pagination;if(e.params.pagination.el&&e.params.pagination.hideOnClick&&r.length>0&&!d(i).hasClass(e.params.pagination.bulletClass)){if(e.navigation&&(e.navigation.nextEl&&i===e.navigation.nextEl||e.navigation.prevEl&&i===e.navigation.prevEl))return;const t=r.hasClass(e.params.pagination.hiddenClass);a(!0===t?"paginationShow":"paginationHide"),r.toggleClass(e.params.pagination.hiddenClass)}})),Object.assign(e.pagination,{render:p,update:c,init:u,destroy:h})},function({swiper:e,extendParams:t,on:s,emit:i}){const r=a();let n,l,o,c,u=!1,h=null,m=null;function f(){if(!e.params.scrollbar.el||!e.scrollbar.el)return;const{scrollbar:t,rtlTranslate:s,progress:a}=e,{$dragEl:i,$el:r}=t,n=e.params.scrollbar;let d=l,c=(o-l)*a;s?(c=-c,c>0?(d=l-c,c=0):-c+l>o&&(d=o+c)):c<0?(d=l+c,c=0):c+l>o&&(d=o-c),e.isHorizontal()?(i.transform(`translate3d(${c}px, 0, 0)`),i[0].style.width=`${d}px`):(i.transform(`translate3d(0px, ${c}px, 0)`),i[0].style.height=`${d}px`),n.hide&&(clearTimeout(h),r[0].style.opacity=1,h=setTimeout((()=>{r[0].style.opacity=0,r.transition(400)}),1e3))}function g(){if(!e.params.scrollbar.el||!e.scrollbar.el)return;const{scrollbar:t}=e,{$dragEl:s,$el:a}=t;s[0].style.width="",s[0].style.height="",o=e.isHorizontal()?a[0].offsetWidth:a[0].offsetHeight,c=e.size/(e.virtualSize+e.params.slidesOffsetBefore-(e.params.centeredSlides?e.snapGrid[0]:0)),l="auto"===e.params.scrollbar.dragSize?o*c:parseInt(e.params.scrollbar.dragSize,10),e.isHorizontal()?s[0].style.width=`${l}px`:s[0].style.height=`${l}px`,a[0].style.display=c>=1?"none":"",e.params.scrollbar.hide&&(a[0].style.opacity=0),e.params.watchOverflow&&e.enabled&&t.$el[e.isLocked?"addClass":"removeClass"](e.params.scrollbar.lockClass)}function v(t){return e.isHorizontal()?"touchstart"===t.type||"touchmove"===t.type?t.targetTouches[0].clientX:t.clientX:"touchstart"===t.type||"touchmove"===t.type?t.targetTouches[0].clientY:t.clientY}function w(t){const{scrollbar:s,rtlTranslate:a}=e,{$el:i}=s;let r;r=(v(t)-i.offset()[e.isHorizontal()?"left":"top"]-(null!==n?n:l/2))/(o-l),r=Math.max(Math.min(r,1),0),a&&(r=1-r);const d=e.minTranslate()+(e.maxTranslate()-e.minTranslate())*r;e.updateProgress(d),e.setTranslate(d),e.updateActiveIndex(),e.updateSlidesClasses()}function b(t){const s=e.params.scrollbar,{scrollbar:a,$wrapperEl:r}=e,{$el:l,$dragEl:o}=a;u=!0,n=t.target===o[0]||t.target===o?v(t)-t.target.getBoundingClientRect()[e.isHorizontal()?"left":"top"]:null,t.preventDefault(),t.stopPropagation(),r.transition(100),o.transition(100),w(t),clearTimeout(m),l.transition(0),s.hide&&l.css("opacity",1),e.params.cssMode&&e.$wrapperEl.css("scroll-snap-type","none"),i("scrollbarDragStart",t)}function x(t){const{scrollbar:s,$wrapperEl:a}=e,{$el:r,$dragEl:n}=s;u&&(t.preventDefault?t.preventDefault():t.returnValue=!1,w(t),a.transition(0),r.transition(0),n.transition(0),i("scrollbarDragMove",t))}function y(t){const s=e.params.scrollbar,{scrollbar:a,$wrapperEl:r}=e,{$el:n}=a;u&&(u=!1,e.params.cssMode&&(e.$wrapperEl.css("scroll-snap-type",""),r.transition("")),s.hide&&(clearTimeout(m),m=p((()=>{n.css("opacity",0),n.transition(400)}),1e3)),i("scrollbarDragEnd",t),s.snapOnRelease&&e.slideToClosest())}function E(t){const{scrollbar:s,touchEventsTouch:a,touchEventsDesktop:i,params:n,support:l}=e,o=s.$el[0],d=!(!l.passiveListener||!n.passiveListeners)&&{passive:!1,capture:!1},c=!(!l.passiveListener||!n.passiveListeners)&&{passive:!0,capture:!1};if(!o)return;const p="on"===t?"addEventListener":"removeEventListener";l.touch?(o[p](a.start,b,d),o[p](a.move,x,d),o[p](a.end,y,c)):(o[p](i.start,b,d),r[p](i.move,x,d),r[p](i.end,y,c))}function T(){const{scrollbar:t,$el:s}=e;e.params.scrollbar=Y(e,e.originalParams.scrollbar,e.params.scrollbar,{el:"swiper-scrollbar"});const a=e.params.scrollbar;if(!a.el)return;let i=d(a.el);e.params.uniqueNavElements&&"string"==typeof a.el&&i.length>1&&1===s.find(a.el).length&&(i=s.find(a.el));let r=i.find(`.${e.params.scrollbar.dragClass}`);0===r.length&&(r=d(`
    `),i.append(r)),Object.assign(t,{$el:i,el:i[0],$dragEl:r,dragEl:r[0]}),a.draggable&&e.params.scrollbar.el&&E("on"),i&&i[e.enabled?"removeClass":"addClass"](e.params.scrollbar.lockClass)}function C(){e.params.scrollbar.el&&E("off")}t({scrollbar:{el:null,dragSize:"auto",hide:!1,draggable:!1,snapOnRelease:!0,lockClass:"swiper-scrollbar-lock",dragClass:"swiper-scrollbar-drag"}}),e.scrollbar={el:null,dragEl:null,$el:null,$dragEl:null},s("init",(()=>{T(),g(),f()})),s("update resize observerUpdate lock unlock",(()=>{g()})),s("setTranslate",(()=>{f()})),s("setTransition",((t,s)=>{!function(t){e.params.scrollbar.el&&e.scrollbar.el&&e.scrollbar.$dragEl.transition(t)}(s)})),s("enable disable",(()=>{const{$el:t}=e.scrollbar;t&&t[e.enabled?"removeClass":"addClass"](e.params.scrollbar.lockClass)})),s("destroy",(()=>{C()})),Object.assign(e.scrollbar,{updateSize:g,setTranslate:f,init:T,destroy:C})},function({swiper:e,extendParams:t,on:s}){t({parallax:{enabled:!1}});const a=(t,s)=>{const{rtl:a}=e,i=d(t),r=a?-1:1,n=i.attr("data-swiper-parallax")||"0";let l=i.attr("data-swiper-parallax-x"),o=i.attr("data-swiper-parallax-y");const c=i.attr("data-swiper-parallax-scale"),p=i.attr("data-swiper-parallax-opacity");if(l||o?(l=l||"0",o=o||"0"):e.isHorizontal()?(l=n,o="0"):(o=n,l="0"),l=l.indexOf("%")>=0?parseInt(l,10)*s*r+"%":l*s*r+"px",o=o.indexOf("%")>=0?parseInt(o,10)*s+"%":o*s+"px",null!=p){const e=p-(p-1)*(1-Math.abs(s));i[0].style.opacity=e}if(null==c)i.transform(`translate3d(${l}, ${o}, 0px)`);else{const e=c-(c-1)*(1-Math.abs(s));i.transform(`translate3d(${l}, ${o}, 0px) scale(${e})`)}},i=()=>{const{$el:t,slides:s,progress:i,snapGrid:r}=e;t.children("[data-swiper-parallax], [data-swiper-parallax-x], [data-swiper-parallax-y], [data-swiper-parallax-opacity], [data-swiper-parallax-scale]").each((e=>{a(e,i)})),s.each(((t,s)=>{let n=t.progress;e.params.slidesPerGroup>1&&"auto"!==e.params.slidesPerView&&(n+=Math.ceil(s/2)-i*(r.length-1)),n=Math.min(Math.max(n,-1),1),d(t).find("[data-swiper-parallax], [data-swiper-parallax-x], [data-swiper-parallax-y], [data-swiper-parallax-opacity], [data-swiper-parallax-scale]").each((e=>{a(e,n)}))}))};s("beforeInit",(()=>{e.params.parallax.enabled&&(e.params.watchSlidesProgress=!0,e.originalParams.watchSlidesProgress=!0)})),s("init",(()=>{e.params.parallax.enabled&&i()})),s("setTranslate",(()=>{e.params.parallax.enabled&&i()})),s("setTransition",((t,s)=>{e.params.parallax.enabled&&((t=e.params.speed)=>{const{$el:s}=e;s.find("[data-swiper-parallax], [data-swiper-parallax-x], [data-swiper-parallax-y], [data-swiper-parallax-opacity], [data-swiper-parallax-scale]").each((e=>{const s=d(e);let a=parseInt(s.attr("data-swiper-parallax-duration"),10)||t;0===t&&(a=0),s.transition(a)}))})(s)}))},function({swiper:e,extendParams:t,on:s,emit:a}){const i=r();t({zoom:{enabled:!1,maxRatio:3,minRatio:1,toggle:!0,containerClass:"swiper-zoom-container",zoomedSlideClass:"swiper-slide-zoomed"}}),e.zoom={enabled:!1};let n,l,o,c=1,p=!1;const u={$slideEl:void 0,slideWidth:void 0,slideHeight:void 0,$imageEl:void 0,$imageWrapEl:void 0,maxRatio:3},m={isTouched:void 0,isMoved:void 0,currentX:void 0,currentY:void 0,minX:void 0,minY:void 0,maxX:void 0,maxY:void 0,width:void 0,height:void 0,startX:void 0,startY:void 0,touchesStart:{},touchesCurrent:{}},f={x:void 0,y:void 0,prevPositionX:void 0,prevPositionY:void 0,prevTime:void 0};let g=1;function v(e){if(e.targetTouches.length<2)return 1;const t=e.targetTouches[0].pageX,s=e.targetTouches[0].pageY,a=e.targetTouches[1].pageX,i=e.targetTouches[1].pageY;return Math.sqrt((a-t)**2+(i-s)**2)}function w(t){const s=e.support,a=e.params.zoom;if(l=!1,o=!1,!s.gestures){if("touchstart"!==t.type||"touchstart"===t.type&&t.targetTouches.length<2)return;l=!0,u.scaleStart=v(t)}u.$slideEl&&u.$slideEl.length||(u.$slideEl=d(t.target).closest(`.${e.params.slideClass}`),0===u.$slideEl.length&&(u.$slideEl=e.slides.eq(e.activeIndex)),u.$imageEl=u.$slideEl.find(`.${a.containerClass}`).eq(0).find("img, svg, canvas, picture, .swiper-zoom-target"),u.$imageWrapEl=u.$imageEl.parent(`.${a.containerClass}`),u.maxRatio=u.$imageWrapEl.attr("data-swiper-zoom")||a.maxRatio,0!==u.$imageWrapEl.length)?(u.$imageEl&&u.$imageEl.transition(0),p=!0):u.$imageEl=void 0}function b(t){const s=e.support,a=e.params.zoom,i=e.zoom;if(!s.gestures){if("touchmove"!==t.type||"touchmove"===t.type&&t.targetTouches.length<2)return;o=!0,u.scaleMove=v(t)}u.$imageEl&&0!==u.$imageEl.length?(s.gestures?i.scale=t.scale*c:i.scale=u.scaleMove/u.scaleStart*c,i.scale>u.maxRatio&&(i.scale=u.maxRatio-1+(i.scale-u.maxRatio+1)**.5),i.scalem.touchesStart.x))return void(m.isTouched=!1);if(!e.isHorizontal()&&(Math.floor(m.minY)===Math.floor(m.startY)&&m.touchesCurrent.ym.touchesStart.y))return void(m.isTouched=!1)}t.cancelable&&t.preventDefault(),t.stopPropagation(),m.isMoved=!0,m.currentX=m.touchesCurrent.x-m.touchesStart.x+m.startX,m.currentY=m.touchesCurrent.y-m.touchesStart.y+m.startY,m.currentXm.maxX&&(m.currentX=m.maxX-1+(m.currentX-m.maxX+1)**.8),m.currentYm.maxY&&(m.currentY=m.maxY-1+(m.currentY-m.maxY+1)**.8),f.prevPositionX||(f.prevPositionX=m.touchesCurrent.x),f.prevPositionY||(f.prevPositionY=m.touchesCurrent.y),f.prevTime||(f.prevTime=Date.now()),f.x=(m.touchesCurrent.x-f.prevPositionX)/(Date.now()-f.prevTime)/2,f.y=(m.touchesCurrent.y-f.prevPositionY)/(Date.now()-f.prevTime)/2,Math.abs(m.touchesCurrent.x-f.prevPositionX)<2&&(f.x=0),Math.abs(m.touchesCurrent.y-f.prevPositionY)<2&&(f.y=0),f.prevPositionX=m.touchesCurrent.x,f.prevPositionY=m.touchesCurrent.y,f.prevTime=Date.now(),u.$imageWrapEl.transform(`translate3d(${m.currentX}px, ${m.currentY}px,0)`)}}function E(){const t=e.zoom;u.$slideEl&&e.previousIndex!==e.activeIndex&&(u.$imageEl&&u.$imageEl.transform("translate3d(0,0,0) scale(1)"),u.$imageWrapEl&&u.$imageWrapEl.transform("translate3d(0,0,0)"),t.scale=1,c=1,u.$slideEl=void 0,u.$imageEl=void 0,u.$imageWrapEl=void 0)}function T(t){const s=e.zoom,a=e.params.zoom;if(u.$slideEl||(t&&t.target&&(u.$slideEl=d(t.target).closest(`.${e.params.slideClass}`)),u.$slideEl||(e.params.virtual&&e.params.virtual.enabled&&e.virtual?u.$slideEl=e.$wrapperEl.children(`.${e.params.slideActiveClass}`):u.$slideEl=e.slides.eq(e.activeIndex)),u.$imageEl=u.$slideEl.find(`.${a.containerClass}`).eq(0).find("img, svg, canvas, picture, .swiper-zoom-target"),u.$imageWrapEl=u.$imageEl.parent(`.${a.containerClass}`)),!u.$imageEl||0===u.$imageEl.length||!u.$imageWrapEl||0===u.$imageWrapEl.length)return;let r,n,l,o,p,h,f,g,v,w,b,x,y,E,T,C,$,S;e.params.cssMode&&(e.wrapperEl.style.overflow="hidden",e.wrapperEl.style.touchAction="none"),u.$slideEl.addClass(`${a.zoomedSlideClass}`),void 0===m.touchesStart.x&&t?(r="touchend"===t.type?t.changedTouches[0].pageX:t.pageX,n="touchend"===t.type?t.changedTouches[0].pageY:t.pageY):(r=m.touchesStart.x,n=m.touchesStart.y),s.scale=u.$imageWrapEl.attr("data-swiper-zoom")||a.maxRatio,c=u.$imageWrapEl.attr("data-swiper-zoom")||a.maxRatio,t?($=u.$slideEl[0].offsetWidth,S=u.$slideEl[0].offsetHeight,l=u.$slideEl.offset().left+i.scrollX,o=u.$slideEl.offset().top+i.scrollY,p=l+$/2-r,h=o+S/2-n,v=u.$imageEl[0].offsetWidth,w=u.$imageEl[0].offsetHeight,b=v*s.scale,x=w*s.scale,y=Math.min($/2-b/2,0),E=Math.min(S/2-x/2,0),T=-y,C=-E,f=p*s.scale,g=h*s.scale,fT&&(f=T),gC&&(g=C)):(f=0,g=0),u.$imageWrapEl.transition(300).transform(`translate3d(${f}px, ${g}px,0)`),u.$imageEl.transition(300).transform(`translate3d(0,0,0) scale(${s.scale})`)}function C(){const t=e.zoom,s=e.params.zoom;u.$slideEl||(e.params.virtual&&e.params.virtual.enabled&&e.virtual?u.$slideEl=e.$wrapperEl.children(`.${e.params.slideActiveClass}`):u.$slideEl=e.slides.eq(e.activeIndex),u.$imageEl=u.$slideEl.find(`.${s.containerClass}`).eq(0).find("img, svg, canvas, picture, .swiper-zoom-target"),u.$imageWrapEl=u.$imageEl.parent(`.${s.containerClass}`)),u.$imageEl&&0!==u.$imageEl.length&&u.$imageWrapEl&&0!==u.$imageWrapEl.length&&(e.params.cssMode&&(e.wrapperEl.style.overflow="",e.wrapperEl.style.touchAction=""),t.scale=1,c=1,u.$imageWrapEl.transition(300).transform("translate3d(0,0,0)"),u.$imageEl.transition(300).transform("translate3d(0,0,0) scale(1)"),u.$slideEl.removeClass(`${s.zoomedSlideClass}`),u.$slideEl=void 0)}function $(t){const s=e.zoom;s.scale&&1!==s.scale?C():T(t)}function S(){const t=e.support;return{passiveListener:!("touchstart"!==e.touchEvents.start||!t.passiveListener||!e.params.passiveListeners)&&{passive:!0,capture:!1},activeListenerWithCapture:!t.passiveListener||{passive:!1,capture:!0}}}function M(){return`.${e.params.slideClass}`}function P(t){const{passiveListener:s}=S(),a=M();e.$wrapperEl[t]("gesturestart",a,w,s),e.$wrapperEl[t]("gesturechange",a,b,s),e.$wrapperEl[t]("gestureend",a,x,s)}function k(){n||(n=!0,P("on"))}function z(){n&&(n=!1,P("off"))}function O(){const t=e.zoom;if(t.enabled)return;t.enabled=!0;const s=e.support,{passiveListener:a,activeListenerWithCapture:i}=S(),r=M();s.gestures?(e.$wrapperEl.on(e.touchEvents.start,k,a),e.$wrapperEl.on(e.touchEvents.end,z,a)):"touchstart"===e.touchEvents.start&&(e.$wrapperEl.on(e.touchEvents.start,r,w,a),e.$wrapperEl.on(e.touchEvents.move,r,b,i),e.$wrapperEl.on(e.touchEvents.end,r,x,a),e.touchEvents.cancel&&e.$wrapperEl.on(e.touchEvents.cancel,r,x,a)),e.$wrapperEl.on(e.touchEvents.move,`.${e.params.zoom.containerClass}`,y,i)}function I(){const t=e.zoom;if(!t.enabled)return;const s=e.support;t.enabled=!1;const{passiveListener:a,activeListenerWithCapture:i}=S(),r=M();s.gestures?(e.$wrapperEl.off(e.touchEvents.start,k,a),e.$wrapperEl.off(e.touchEvents.end,z,a)):"touchstart"===e.touchEvents.start&&(e.$wrapperEl.off(e.touchEvents.start,r,w,a),e.$wrapperEl.off(e.touchEvents.move,r,b,i),e.$wrapperEl.off(e.touchEvents.end,r,x,a),e.touchEvents.cancel&&e.$wrapperEl.off(e.touchEvents.cancel,r,x,a)),e.$wrapperEl.off(e.touchEvents.move,`.${e.params.zoom.containerClass}`,y,i)}Object.defineProperty(e.zoom,"scale",{get:()=>g,set(e){if(g!==e){const t=u.$imageEl?u.$imageEl[0]:void 0,s=u.$slideEl?u.$slideEl[0]:void 0;a("zoomChange",e,t,s)}g=e}}),s("init",(()=>{e.params.zoom.enabled&&O()})),s("destroy",(()=>{I()})),s("touchStart",((t,s)=>{e.zoom.enabled&&function(t){const s=e.device;u.$imageEl&&0!==u.$imageEl.length&&(m.isTouched||(s.android&&t.cancelable&&t.preventDefault(),m.isTouched=!0,m.touchesStart.x="touchstart"===t.type?t.targetTouches[0].pageX:t.pageX,m.touchesStart.y="touchstart"===t.type?t.targetTouches[0].pageY:t.pageY))}(s)})),s("touchEnd",((t,s)=>{e.zoom.enabled&&function(){const t=e.zoom;if(!u.$imageEl||0===u.$imageEl.length)return;if(!m.isTouched||!m.isMoved)return m.isTouched=!1,void(m.isMoved=!1);m.isTouched=!1,m.isMoved=!1;let s=300,a=300;const i=f.x*s,r=m.currentX+i,n=f.y*a,l=m.currentY+n;0!==f.x&&(s=Math.abs((r-m.currentX)/f.x)),0!==f.y&&(a=Math.abs((l-m.currentY)/f.y));const o=Math.max(s,a);m.currentX=r,m.currentY=l;const d=m.width*t.scale,c=m.height*t.scale;m.minX=Math.min(u.slideWidth/2-d/2,0),m.maxX=-m.minX,m.minY=Math.min(u.slideHeight/2-c/2,0),m.maxY=-m.minY,m.currentX=Math.max(Math.min(m.currentX,m.maxX),m.minX),m.currentY=Math.max(Math.min(m.currentY,m.maxY),m.minY),u.$imageWrapEl.transition(o).transform(`translate3d(${m.currentX}px, ${m.currentY}px,0)`)}()})),s("doubleTap",((t,s)=>{!e.animating&&e.params.zoom.enabled&&e.zoom.enabled&&e.params.zoom.toggle&&$(s)})),s("transitionEnd",(()=>{e.zoom.enabled&&e.params.zoom.enabled&&E()})),s("slideChange",(()=>{e.zoom.enabled&&e.params.zoom.enabled&&e.params.cssMode&&E()})),Object.assign(e.zoom,{enable:O,disable:I,in:T,out:C,toggle:$})},function({swiper:e,extendParams:t,on:s,emit:a}){t({lazy:{checkInView:!1,enabled:!1,loadPrevNext:!1,loadPrevNextAmount:1,loadOnTransitionStart:!1,scrollingElement:"",elementClass:"swiper-lazy",loadingClass:"swiper-lazy-loading",loadedClass:"swiper-lazy-loaded",preloaderClass:"swiper-lazy-preloader"}}),e.lazy={};let i=!1,n=!1;function l(t,s=!0){const i=e.params.lazy;if(void 0===t)return;if(0===e.slides.length)return;const r=e.virtual&&e.params.virtual.enabled?e.$wrapperEl.children(`.${e.params.slideClass}[data-swiper-slide-index="${t}"]`):e.slides.eq(t),n=r.find(`.${i.elementClass}:not(.${i.loadedClass}):not(.${i.loadingClass})`);!r.hasClass(i.elementClass)||r.hasClass(i.loadedClass)||r.hasClass(i.loadingClass)||n.push(r[0]),0!==n.length&&n.each((t=>{const n=d(t);n.addClass(i.loadingClass);const o=n.attr("data-background"),c=n.attr("data-src"),p=n.attr("data-srcset"),u=n.attr("data-sizes"),h=n.parent("picture");e.loadImage(n[0],c||o,p,u,!1,(()=>{if(null!=e&&e&&(!e||e.params)&&!e.destroyed){if(o?(n.css("background-image",`url("${o}")`),n.removeAttr("data-background")):(p&&(n.attr("srcset",p),n.removeAttr("data-srcset")),u&&(n.attr("sizes",u),n.removeAttr("data-sizes")),h.length&&h.children("source").each((e=>{const t=d(e);t.attr("data-srcset")&&(t.attr("srcset",t.attr("data-srcset")),t.removeAttr("data-srcset"))})),c&&(n.attr("src",c),n.removeAttr("data-src"))),n.addClass(i.loadedClass).removeClass(i.loadingClass),r.find(`.${i.preloaderClass}`).remove(),e.params.loop&&s){const t=r.attr("data-swiper-slide-index");if(r.hasClass(e.params.slideDuplicateClass)){l(e.$wrapperEl.children(`[data-swiper-slide-index="${t}"]:not(.${e.params.slideDuplicateClass})`).index(),!1)}else{l(e.$wrapperEl.children(`.${e.params.slideDuplicateClass}[data-swiper-slide-index="${t}"]`).index(),!1)}}a("lazyImageReady",r[0],n[0]),e.params.autoHeight&&e.updateAutoHeight()}})),a("lazyImageLoad",r[0],n[0])}))}function o(){const{$wrapperEl:t,params:s,slides:a,activeIndex:i}=e,r=e.virtual&&s.virtual.enabled,o=s.lazy;let c=s.slidesPerView;function p(e){if(r){if(t.children(`.${s.slideClass}[data-swiper-slide-index="${e}"]`).length)return!0}else if(a[e])return!0;return!1}function u(e){return r?d(e).attr("data-swiper-slide-index"):d(e).index()}if("auto"===c&&(c=0),n||(n=!0),e.params.watchSlidesProgress)t.children(`.${s.slideVisibleClass}`).each((e=>{l(r?d(e).attr("data-swiper-slide-index"):d(e).index())}));else if(c>1)for(let e=i;e1||o.loadPrevNextAmount&&o.loadPrevNextAmount>1){const e=o.loadPrevNextAmount,t=c,s=Math.min(i+t+Math.max(e,t),a.length),r=Math.max(i-Math.max(t,e),0);for(let e=i+c;e0&&l(u(e));const a=t.children(`.${s.slidePrevClass}`);a.length>0&&l(u(a))}}function c(){const t=r();if(!e||e.destroyed)return;const s=e.params.lazy.scrollingElement?d(e.params.lazy.scrollingElement):d(t),a=s[0]===t,n=a?t.innerWidth:s[0].offsetWidth,l=a?t.innerHeight:s[0].offsetHeight,p=e.$el.offset(),{rtlTranslate:u}=e;let h=!1;u&&(p.left-=e.$el[0].scrollLeft);const m=[[p.left,p.top],[p.left+e.width,p.top],[p.left,p.top+e.height],[p.left+e.width,p.top+e.height]];for(let e=0;e=0&&t[0]<=n&&t[1]>=0&&t[1]<=l){if(0===t[0]&&0===t[1])continue;h=!0}}const f=!("touchstart"!==e.touchEvents.start||!e.support.passiveListener||!e.params.passiveListeners)&&{passive:!0,capture:!1};h?(o(),s.off("scroll",c,f)):i||(i=!0,s.on("scroll",c,f))}s("beforeInit",(()=>{e.params.lazy.enabled&&e.params.preloadImages&&(e.params.preloadImages=!1)})),s("init",(()=>{e.params.lazy.enabled&&(e.params.lazy.checkInView?c():o())})),s("scroll",(()=>{e.params.freeMode&&e.params.freeMode.enabled&&!e.params.freeMode.sticky&&o()})),s("scrollbarDragMove resize _freeModeNoMomentumRelease",(()=>{e.params.lazy.enabled&&(e.params.lazy.checkInView?c():o())})),s("transitionStart",(()=>{e.params.lazy.enabled&&(e.params.lazy.loadOnTransitionStart||!e.params.lazy.loadOnTransitionStart&&!n)&&(e.params.lazy.checkInView?c():o())})),s("transitionEnd",(()=>{e.params.lazy.enabled&&!e.params.lazy.loadOnTransitionStart&&(e.params.lazy.checkInView?c():o())})),s("slideChange",(()=>{const{lazy:t,cssMode:s,watchSlidesProgress:a,touchReleaseOnEdges:i,resistanceRatio:r}=e.params;t.enabled&&(s||a&&(i||0===r))&&o()})),Object.assign(e.lazy,{load:o,loadInSlide:l})},function({swiper:e,extendParams:t,on:s}){function a(e,t){const s=function(){let e,t,s;return(a,i)=>{for(t=-1,e=a.length;e-t>1;)s=e+t>>1,a[s]<=i?t=s:e=s;return e}}();let a,i;return this.x=e,this.y=t,this.lastIndex=e.length-1,this.interpolate=function(e){return e?(i=s(this.x,e),a=i-1,(e-this.x[a])*(this.y[i]-this.y[a])/(this.x[i]-this.x[a])+this.y[a]):0},this}function i(){e.controller.control&&e.controller.spline&&(e.controller.spline=void 0,delete e.controller.spline)}t({controller:{control:void 0,inverse:!1,by:"slide"}}),e.controller={control:void 0},s("beforeInit",(()=>{e.controller.control=e.params.controller.control})),s("update",(()=>{i()})),s("resize",(()=>{i()})),s("observerUpdate",(()=>{i()})),s("setTranslate",((t,s,a)=>{e.controller.control&&e.controller.setTranslate(s,a)})),s("setTransition",((t,s,a)=>{e.controller.control&&e.controller.setTransition(s,a)})),Object.assign(e.controller,{setTranslate:function(t,s){const i=e.controller.control;let r,n;const l=e.constructor;function o(t){const s=e.rtlTranslate?-e.translate:e.translate;"slide"===e.params.controller.by&&(!function(t){e.controller.spline||(e.controller.spline=e.params.loop?new a(e.slidesGrid,t.slidesGrid):new a(e.snapGrid,t.snapGrid))}(t),n=-e.controller.spline.interpolate(-s)),n&&"container"!==e.params.controller.by||(r=(t.maxTranslate()-t.minTranslate())/(e.maxTranslate()-e.minTranslate()),n=(s-e.minTranslate())*r+t.minTranslate()),e.params.controller.inverse&&(n=t.maxTranslate()-n),t.updateProgress(n),t.setTranslate(n,e),t.updateActiveIndex(),t.updateSlidesClasses()}if(Array.isArray(i))for(let e=0;e{s.updateAutoHeight()})),s.$wrapperEl.transitionEnd((()=>{i&&(s.params.loop&&"slide"===e.params.controller.by&&s.loopFix(),s.transitionEnd())})))}if(Array.isArray(i))for(r=0;r0&&(e.isBeginning?(p(s),n(s)):(u(s),r(s))),t&&t.length>0&&(e.isEnd?(p(t),n(t)):(u(t),r(t)))}function f(){return e.pagination&&e.params.pagination.clickable&&e.pagination.bullets&&e.pagination.bullets.length}const g=(e,t,s)=>{r(e),"BUTTON"!==e[0].tagName&&(l(e,"button"),e.on("keydown",h)),c(e,s),function(e,t){e.attr("aria-controls",t)}(e,t)};function v(){const t=e.params.a11y;e.$el.append(a);const s=e.$el;t.containerRoleDescriptionMessage&&o(s,t.containerRoleDescriptionMessage),t.containerMessage&&c(s,t.containerMessage);const i=e.$wrapperEl,r=i.attr("id")||`swiper-wrapper-${function(e=16){return"x".repeat(e).replace(/x/g,(()=>Math.round(16*Math.random()).toString(16)))}(16)}`,n=e.params.autoplay&&e.params.autoplay.enabled?"off":"polite";var p;p=r,i.attr("id",p),function(e,t){e.attr("aria-live",t)}(i,n),t.itemRoleDescriptionMessage&&o(d(e.slides),t.itemRoleDescriptionMessage),l(d(e.slides),t.slideRole);const u=e.params.loop?e.slides.filter((t=>!t.classList.contains(e.params.slideDuplicateClass))).length:e.slides.length;let m,v;e.slides.each(((s,a)=>{const i=d(s),r=e.params.loop?parseInt(i.attr("data-swiper-slide-index"),10):a;c(i,t.slideLabelMessage.replace(/\{\{index\}\}/,r+1).replace(/\{\{slidesLength\}\}/,u))})),e.navigation&&e.navigation.$nextEl&&(m=e.navigation.$nextEl),e.navigation&&e.navigation.$prevEl&&(v=e.navigation.$prevEl),m&&m.length&&g(m,r,t.nextSlideMessage),v&&v.length&&g(v,r,t.prevSlideMessage),f()&&e.pagination.$el.on("keydown",W(e.params.pagination.bulletClass),h)}s("beforeInit",(()=>{a=d(``)})),s("afterInit",(()=>{e.params.a11y.enabled&&(v(),m())})),s("toEdge",(()=>{e.params.a11y.enabled&&m()})),s("fromEdge",(()=>{e.params.a11y.enabled&&m()})),s("paginationUpdate",(()=>{e.params.a11y.enabled&&function(){const t=e.params.a11y;f()&&e.pagination.bullets.each((s=>{const a=d(s);r(a),e.params.pagination.renderBullet||(l(a,"button"),c(a,t.paginationBulletMessage.replace(/\{\{index\}\}/,a.index()+1)))}))}()})),s("destroy",(()=>{e.params.a11y.enabled&&function(){let t,s;a&&a.length>0&&a.remove(),e.navigation&&e.navigation.$nextEl&&(t=e.navigation.$nextEl),e.navigation&&e.navigation.$prevEl&&(s=e.navigation.$prevEl),t&&t.off("keydown",h),s&&s.off("keydown",h),f()&&e.pagination.$el.off("keydown",W(e.params.pagination.bulletClass),h)}()}))},function({swiper:e,extendParams:t,on:s}){t({history:{enabled:!1,root:"",replaceState:!1,key:"slides"}});let a=!1,i={};const n=e=>e.toString().replace(/\s+/g,"-").replace(/[^\w-]+/g,"").replace(/--+/g,"-").replace(/^-+/,"").replace(/-+$/,""),l=e=>{const t=r();let s;s=e?new URL(e):t.location;const a=s.pathname.slice(1).split("/").filter((e=>""!==e)),i=a.length;return{key:a[i-2],value:a[i-1]}},o=(t,s)=>{const i=r();if(!a||!e.params.history.enabled)return;let l;l=e.params.url?new URL(e.params.url):i.location;const o=e.slides.eq(s);let d=n(o.attr("data-history"));if(e.params.history.root.length>0){let s=e.params.history.root;"/"===s[s.length-1]&&(s=s.slice(0,s.length-1)),d=`${s}/${t}/${d}`}else l.pathname.includes(t)||(d=`${t}/${d}`);const c=i.history.state;c&&c.value===d||(e.params.history.replaceState?i.history.replaceState({value:d},null,d):i.history.pushState({value:d},null,d))},d=(t,s,a)=>{if(s)for(let i=0,r=e.slides.length;i{i=l(e.params.url),d(e.params.speed,e.paths.value,!1)};s("init",(()=>{e.params.history.enabled&&(()=>{const t=r();if(e.params.history){if(!t.history||!t.history.pushState)return e.params.history.enabled=!1,void(e.params.hashNavigation.enabled=!0);a=!0,i=l(e.params.url),(i.key||i.value)&&(d(0,i.value,e.params.runCallbacksOnInit),e.params.history.replaceState||t.addEventListener("popstate",c))}})()})),s("destroy",(()=>{e.params.history.enabled&&(()=>{const t=r();e.params.history.replaceState||t.removeEventListener("popstate",c)})()})),s("transitionEnd _freeModeNoMomentumRelease",(()=>{a&&o(e.params.history.key,e.activeIndex)})),s("slideChange",(()=>{a&&e.params.cssMode&&o(e.params.history.key,e.activeIndex)}))},function({swiper:e,extendParams:t,emit:s,on:i}){let n=!1;const l=a(),o=r();t({hashNavigation:{enabled:!1,replaceState:!1,watchState:!1}});const c=()=>{s("hashChange");const t=l.location.hash.replace("#","");if(t!==e.slides.eq(e.activeIndex).attr("data-hash")){const s=e.$wrapperEl.children(`.${e.params.slideClass}[data-hash="${t}"]`).index();if(void 0===s)return;e.slideTo(s)}},p=()=>{if(n&&e.params.hashNavigation.enabled)if(e.params.hashNavigation.replaceState&&o.history&&o.history.replaceState)o.history.replaceState(null,null,`#${e.slides.eq(e.activeIndex).attr("data-hash")}`||""),s("hashSet");else{const t=e.slides.eq(e.activeIndex),a=t.attr("data-hash")||t.attr("data-history");l.location.hash=a||"",s("hashSet")}};i("init",(()=>{e.params.hashNavigation.enabled&&(()=>{if(!e.params.hashNavigation.enabled||e.params.history&&e.params.history.enabled)return;n=!0;const t=l.location.hash.replace("#","");if(t){const s=0;for(let a=0,i=e.slides.length;a{e.params.hashNavigation.enabled&&e.params.hashNavigation.watchState&&d(o).off("hashchange",c)})),i("transitionEnd _freeModeNoMomentumRelease",(()=>{n&&p()})),i("slideChange",(()=>{n&&e.params.cssMode&&p()}))},function({swiper:e,extendParams:t,on:s,emit:i}){let r;function n(){const t=e.slides.eq(e.activeIndex);let s=e.params.autoplay.delay;t.attr("data-swiper-autoplay")&&(s=t.attr("data-swiper-autoplay")||e.params.autoplay.delay),clearTimeout(r),r=p((()=>{let t;e.params.autoplay.reverseDirection?e.params.loop?(e.loopFix(),t=e.slidePrev(e.params.speed,!0,!0),i("autoplay")):e.isBeginning?e.params.autoplay.stopOnLastSlide?o():(t=e.slideTo(e.slides.length-1,e.params.speed,!0,!0),i("autoplay")):(t=e.slidePrev(e.params.speed,!0,!0),i("autoplay")):e.params.loop?(e.loopFix(),t=e.slideNext(e.params.speed,!0,!0),i("autoplay")):e.isEnd?e.params.autoplay.stopOnLastSlide?o():(t=e.slideTo(0,e.params.speed,!0,!0),i("autoplay")):(t=e.slideNext(e.params.speed,!0,!0),i("autoplay")),(e.params.cssMode&&e.autoplay.running||!1===t)&&n()}),s)}function l(){return void 0===r&&(!e.autoplay.running&&(e.autoplay.running=!0,i("autoplayStart"),n(),!0))}function o(){return!!e.autoplay.running&&(void 0!==r&&(r&&(clearTimeout(r),r=void 0),e.autoplay.running=!1,i("autoplayStop"),!0))}function d(t){e.autoplay.running&&(e.autoplay.paused||(r&&clearTimeout(r),e.autoplay.paused=!0,0!==t&&e.params.autoplay.waitForTransition?["transitionend","webkitTransitionEnd"].forEach((t=>{e.$wrapperEl[0].addEventListener(t,u)})):(e.autoplay.paused=!1,n())))}function c(){const t=a();"hidden"===t.visibilityState&&e.autoplay.running&&d(),"visible"===t.visibilityState&&e.autoplay.paused&&(n(),e.autoplay.paused=!1)}function u(t){e&&!e.destroyed&&e.$wrapperEl&&t.target===e.$wrapperEl[0]&&(["transitionend","webkitTransitionEnd"].forEach((t=>{e.$wrapperEl[0].removeEventListener(t,u)})),e.autoplay.paused=!1,e.autoplay.running?n():o())}function h(){e.params.autoplay.disableOnInteraction?o():d(),["transitionend","webkitTransitionEnd"].forEach((t=>{e.$wrapperEl[0].removeEventListener(t,u)}))}function m(){e.params.autoplay.disableOnInteraction||(e.autoplay.paused=!1,n())}e.autoplay={running:!1,paused:!1},t({autoplay:{enabled:!1,delay:3e3,waitForTransition:!0,disableOnInteraction:!0,stopOnLastSlide:!1,reverseDirection:!1,pauseOnMouseEnter:!1}}),s("init",(()=>{if(e.params.autoplay.enabled){l();a().addEventListener("visibilitychange",c),e.params.autoplay.pauseOnMouseEnter&&(e.$el.on("mouseenter",h),e.$el.on("mouseleave",m))}})),s("beforeTransitionStart",((t,s,a)=>{e.autoplay.running&&(a||!e.params.autoplay.disableOnInteraction?e.autoplay.pause(s):o())})),s("sliderFirstMove",(()=>{e.autoplay.running&&(e.params.autoplay.disableOnInteraction?o():d())})),s("touchEnd",(()=>{e.params.cssMode&&e.autoplay.paused&&!e.params.autoplay.disableOnInteraction&&n()})),s("destroy",(()=>{e.$el.off("mouseenter",h),e.$el.off("mouseleave",m),e.autoplay.running&&o();a().removeEventListener("visibilitychange",c)})),Object.assign(e.autoplay,{pause:d,run:n,start:l,stop:o})},function({swiper:e,extendParams:t,on:s}){t({thumbs:{swiper:null,multipleActiveThumbs:!0,autoScrollOffset:0,slideThumbActiveClass:"swiper-slide-thumb-active",thumbsContainerClass:"swiper-thumbs"}});let a=!1,i=!1;function r(){const t=e.thumbs.swiper;if(!t)return;const s=t.clickedIndex,a=t.clickedSlide;if(a&&d(a).hasClass(e.params.thumbs.slideThumbActiveClass))return;if(null==s)return;let i;if(i=t.params.loop?parseInt(d(t.clickedSlide).attr("data-swiper-slide-index"),10):s,e.params.loop){let t=e.activeIndex;e.slides.eq(t).hasClass(e.params.slideDuplicateClass)&&(e.loopFix(),e._clientLeft=e.$wrapperEl[0].clientLeft,t=e.activeIndex);const s=e.slides.eq(t).prevAll(`[data-swiper-slide-index="${i}"]`).eq(0).index(),a=e.slides.eq(t).nextAll(`[data-swiper-slide-index="${i}"]`).eq(0).index();i=void 0===s?a:void 0===a?s:a-t1?a:o:a-oe.previousIndex?"next":"prev"}else n=e.realIndex,l=n>e.previousIndex?"next":"prev";r&&(n+="next"===l?i:-1*i),s.visibleSlidesIndexes&&s.visibleSlidesIndexes.indexOf(n)<0&&(s.params.centeredSlides?n=n>o?n-Math.floor(a/2)+1:n+Math.floor(a/2)-1:n>o&&s.params.slidesPerGroup,s.slideTo(n,t?0:void 0))}let n=1;const l=e.params.thumbs.slideThumbActiveClass;if(e.params.slidesPerView>1&&!e.params.centeredSlides&&(n=e.params.slidesPerView),e.params.thumbs.multipleActiveThumbs||(n=1),n=Math.floor(n),s.slides.removeClass(l),s.params.loop||s.params.virtual&&s.params.virtual.enabled)for(let t=0;t{const{thumbs:t}=e.params;t&&t.swiper&&(n(),l(!0))})),s("slideChange update resize observerUpdate",(()=>{e.thumbs.swiper&&l()})),s("setTransition",((t,s)=>{const a=e.thumbs.swiper;a&&a.setTransition(s)})),s("beforeDestroy",(()=>{const t=e.thumbs.swiper;t&&i&&t&&t.destroy()})),Object.assign(e.thumbs,{init:n,update:l})},function({swiper:e,extendParams:t,emit:s,once:a}){t({freeMode:{enabled:!1,momentum:!0,momentumRatio:1,momentumBounce:!0,momentumBounceRatio:1,momentumVelocityRatio:1,sticky:!1,minimumVelocity:.02}}),Object.assign(e,{freeMode:{onTouchMove:function(){const{touchEventsData:t,touches:s}=e;0===t.velocities.length&&t.velocities.push({position:s[e.isHorizontal()?"startX":"startY"],time:t.touchStartTime}),t.velocities.push({position:s[e.isHorizontal()?"currentX":"currentY"],time:u()})},onTouchEnd:function({currentPos:t}){const{params:i,$wrapperEl:r,rtlTranslate:n,snapGrid:l,touchEventsData:o}=e,d=u()-o.touchStartTime;if(t<-e.minTranslate())e.slideTo(e.activeIndex);else if(t>-e.maxTranslate())e.slides.length1){const t=o.velocities.pop(),s=o.velocities.pop(),a=t.position-s.position,r=t.time-s.time;e.velocity=a/r,e.velocity/=2,Math.abs(e.velocity)150||u()-t.time>300)&&(e.velocity=0)}else e.velocity=0;e.velocity*=i.freeMode.momentumVelocityRatio,o.velocities.length=0;let t=1e3*i.freeMode.momentumRatio;const d=e.velocity*t;let c=e.translate+d;n&&(c=-c);let p,h=!1;const m=20*Math.abs(e.velocity)*i.freeMode.momentumBounceRatio;let f;if(ce.minTranslate())i.freeMode.momentumBounce?(c-e.minTranslate()>m&&(c=e.minTranslate()+m),p=e.minTranslate(),h=!0,o.allowMomentumBounce=!0):c=e.minTranslate(),i.loop&&i.centeredSlides&&(f=!0);else if(i.freeMode.sticky){let t;for(let e=0;e-c){t=e;break}c=Math.abs(l[t]-c){e.loopFix()})),0!==e.velocity){if(t=n?Math.abs((-c-e.translate)/e.velocity):Math.abs((c-e.translate)/e.velocity),i.freeMode.sticky){const s=Math.abs((n?-c:c)-e.translate),a=e.slidesSizesGrid[e.activeIndex];t=s{e&&!e.destroyed&&o.allowMomentumBounce&&(s("momentumBounce"),e.setTransition(i.speed),setTimeout((()=>{e.setTranslate(p),r.transitionEnd((()=>{e&&!e.destroyed&&e.transitionEnd()}))}),0))}))):e.velocity?(s("_freeModeNoMomentumRelease"),e.updateProgress(c),e.setTransition(t),e.setTranslate(c),e.transitionStart(!0,e.swipeDirection),e.animating||(e.animating=!0,r.transitionEnd((()=>{e&&!e.destroyed&&e.transitionEnd()})))):e.updateProgress(c),e.updateActiveIndex(),e.updateSlidesClasses()}else{if(i.freeMode.sticky)return void e.slideToClosest();i.freeMode&&s("_freeModeNoMomentumRelease")}(!i.freeMode.momentum||d>=i.longSwipesMs)&&(e.updateProgress(),e.updateActiveIndex(),e.updateSlidesClasses())}}}})},function({swiper:e,extendParams:t}){let s,a,i;t({grid:{rows:1,fill:"column"}}),e.grid={initSlides:t=>{const{slidesPerView:r}=e.params,{rows:n,fill:l}=e.params.grid;a=s/n,i=Math.floor(t/n),s=Math.floor(t/n)===t/n?t:Math.ceil(t/n)*n,"auto"!==r&&"row"===l&&(s=Math.max(s,r*n))},updateSlide:(t,r,n,l)=>{const{slidesPerGroup:o,spaceBetween:d}=e.params,{rows:c,fill:p}=e.params.grid;let u,h,m;if("row"===p&&o>1){const e=Math.floor(t/(o*c)),a=t-c*o*e,i=0===e?o:Math.min(Math.ceil((n-e*c*o)/c),o);m=Math.floor(a/i),h=a-m*i+e*o,u=h+m*s/c,r.css({"-webkit-order":u,order:u})}else"column"===p?(h=Math.floor(t/c),m=t-h*c,(h>i||h===i&&m===c-1)&&(m+=1,m>=c&&(m=0,h+=1))):(m=Math.floor(t/a),h=t-m*a);r.css(l("margin-top"),0!==m?d&&`${d}px`:"")},updateWrapperSize:(t,a,i)=>{const{spaceBetween:r,centeredSlides:n,roundLengths:l}=e.params,{rows:o}=e.params.grid;if(e.virtualSize=(t+r)*s,e.virtualSize=Math.ceil(e.virtualSize/o)-r,e.$wrapperEl.css({[i("width")]:`${e.virtualSize+r}px`}),n){a.splice(0,a.length);const t=[];for(let s=0;s{const{slides:t}=e,s=e.params.fadeEffect;for(let a=0;a{const{transformEl:s}=e.params.fadeEffect;(s?e.slides.find(s):e.slides).transition(t),K({swiper:e,duration:t,transformEl:s,allSlides:!0})},overwriteParams:()=>({slidesPerView:1,slidesPerGroup:1,watchSlidesProgress:!0,spaceBetween:0,virtualTranslate:!e.params.cssMode})})},function({swiper:e,extendParams:t,on:s}){t({cubeEffect:{slideShadows:!0,shadow:!0,shadowOffset:20,shadowScale:.94}}),F({effect:"cube",swiper:e,on:s,setTranslate:()=>{const{$el:t,$wrapperEl:s,slides:a,width:i,height:r,rtlTranslate:n,size:l,browser:o}=e,c=e.params.cubeEffect,p=e.isHorizontal(),u=e.virtual&&e.params.virtual.enabled;let h,m=0;c.shadow&&(p?(h=s.find(".swiper-cube-shadow"),0===h.length&&(h=d('
    '),s.append(h)),h.css({height:`${i}px`})):(h=t.find(".swiper-cube-shadow"),0===h.length&&(h=d('
    '),t.append(h))));for(let e=0;e-1&&(m=90*s+90*o,n&&(m=90*-s-90*o)),t.transform(v),c.slideShadows){let e=p?t.find(".swiper-slide-shadow-left"):t.find(".swiper-slide-shadow-top"),s=p?t.find(".swiper-slide-shadow-right"):t.find(".swiper-slide-shadow-bottom");0===e.length&&(e=d(`
    `),t.append(e)),0===s.length&&(s=d(`
    `),t.append(s)),e.length&&(e[0].style.opacity=Math.max(-o,0)),s.length&&(s[0].style.opacity=Math.max(o,0))}}if(s.css({"-webkit-transform-origin":`50% 50% -${l/2}px`,"transform-origin":`50% 50% -${l/2}px`}),c.shadow)if(p)h.transform(`translate3d(0px, ${i/2+c.shadowOffset}px, ${-i/2}px) rotateX(90deg) rotateZ(0deg) scale(${c.shadowScale})`);else{const e=Math.abs(m)-90*Math.floor(Math.abs(m)/90),t=1.5-(Math.sin(2*e*Math.PI/360)/2+Math.cos(2*e*Math.PI/360)/2),s=c.shadowScale,a=c.shadowScale/t,i=c.shadowOffset;h.transform(`scale3d(${s}, 1, ${a}) translate3d(0px, ${r/2+i}px, ${-r/2/a}px) rotateX(-90deg)`)}const f=o.isSafari||o.isWebView?-l/2:0;s.transform(`translate3d(0px,0,${f}px) rotateX(${e.isHorizontal()?0:m}deg) rotateY(${e.isHorizontal()?-m:0}deg)`)},setTransition:t=>{const{$el:s,slides:a}=e;a.transition(t).find(".swiper-slide-shadow-top, .swiper-slide-shadow-right, .swiper-slide-shadow-bottom, .swiper-slide-shadow-left").transition(t),e.params.cubeEffect.shadow&&!e.isHorizontal()&&s.find(".swiper-cube-shadow").transition(t)},perspective:()=>!0,overwriteParams:()=>({slidesPerView:1,slidesPerGroup:1,watchSlidesProgress:!0,resistanceRatio:0,spaceBetween:0,centeredSlides:!1,virtualTranslate:!0})})},function({swiper:e,extendParams:t,on:s}){t({flipEffect:{slideShadows:!0,limitRotation:!0,transformEl:null}}),F({effect:"flip",swiper:e,on:s,setTranslate:()=>{const{slides:t,rtlTranslate:s}=e,a=e.params.flipEffect;for(let i=0;i{const{transformEl:s}=e.params.flipEffect;(s?e.slides.find(s):e.slides).transition(t).find(".swiper-slide-shadow-top, .swiper-slide-shadow-right, .swiper-slide-shadow-bottom, .swiper-slide-shadow-left").transition(t),K({swiper:e,duration:t,transformEl:s})},perspective:()=>!0,overwriteParams:()=>({slidesPerView:1,slidesPerGroup:1,watchSlidesProgress:!0,spaceBetween:0,virtualTranslate:!e.params.cssMode})})},function({swiper:e,extendParams:t,on:s}){t({coverflowEffect:{rotate:50,stretch:0,depth:100,scale:1,modifier:1,slideShadows:!0,transformEl:null}}),F({effect:"coverflow",swiper:e,on:s,setTranslate:()=>{const{width:t,height:s,slides:a,slidesSizesGrid:i}=e,r=e.params.coverflowEffect,n=e.isHorizontal(),l=e.translate,o=n?t/2-l:s/2-l,d=n?r.rotate:-r.rotate,c=r.depth;for(let e=0,t=a.length;e0?l:0),s.length&&(s[0].style.opacity=-l>0?-l:0)}}},setTransition:t=>{const{transformEl:s}=e.params.coverflowEffect;(s?e.slides.find(s):e.slides).transition(t).find(".swiper-slide-shadow-top, .swiper-slide-shadow-right, .swiper-slide-shadow-bottom, .swiper-slide-shadow-left").transition(t)},perspective:()=>!0,overwriteParams:()=>({watchSlidesProgress:!0})})},function({swiper:e,extendParams:t,on:s}){t({creativeEffect:{transformEl:null,limitProgress:1,shadowPerProgress:!1,progressMultiplier:1,perspective:!0,prev:{translate:[0,0,0],rotate:[0,0,0],opacity:1,scale:1},next:{translate:[0,0,0],rotate:[0,0,0],opacity:1,scale:1}}});const a=e=>"string"==typeof e?e:`${e}px`;F({effect:"creative",swiper:e,on:s,setTranslate:()=>{const{slides:t,$wrapperEl:s,slidesSizesGrid:i}=e,r=e.params.creativeEffect,{progressMultiplier:n}=r,l=e.params.centeredSlides;if(l){const t=i[0]/2-e.params.slidesOffsetBefore||0;s.transform(`translateX(calc(50% - ${t}px))`)}for(let s=0;s0&&(f=r.prev,m=!0),u.forEach(((e,t)=>{u[t]=`calc(${e}px + (${a(f.translate[t])} * ${Math.abs(d*n)}))`})),h.forEach(((e,t)=>{h[t]=f.rotate[t]*Math.abs(d*n)})),i[0].style.zIndex=-Math.abs(Math.round(o))+t.length;const g=u.join(", "),v=`rotateX(${h[0]}deg) rotateY(${h[1]}deg) rotateZ(${h[2]}deg)`,w=c<0?`scale(${1+(1-f.scale)*c*n})`:`scale(${1-(1-f.scale)*c*n})`,b=c<0?1+(1-f.opacity)*c*n:1-(1-f.opacity)*c*n,x=`translate3d(${g}) ${v} ${w}`;if(m&&f.shadow||!m){let e=i.children(".swiper-slide-shadow");if(0===e.length&&f.shadow&&(e=Z(r,i)),e.length){const t=r.shadowPerProgress?d*(1/r.limitProgress):d;e[0].style.opacity=Math.min(Math.max(Math.abs(t),0),1)}}const y=U(r,i);y.transform(x).css({opacity:b}),f.origin&&y.css("transform-origin",f.origin)}},setTransition:t=>{const{transformEl:s}=e.params.creativeEffect;(s?e.slides.find(s):e.slides).transition(t).find(".swiper-slide-shadow").transition(t),K({swiper:e,duration:t,transformEl:s,allSlides:!0})},perspective:()=>e.params.creativeEffect.perspective,overwriteParams:()=>({watchSlidesProgress:!0,virtualTranslate:!e.params.cssMode})})},function({swiper:e,extendParams:t,on:s}){t({cardsEffect:{slideShadows:!0,transformEl:null}}),F({effect:"cards",swiper:e,on:s,setTranslate:()=>{const{slides:t,activeIndex:s}=e,a=e.params.cardsEffect,{startTranslate:i,isTouched:r}=e.touchEventsData,n=e.translate;for(let l=0;l0&&c<1&&(r||e.params.cssMode)&&n-1&&(r||e.params.cssMode)&&n>i;if(w||b){const e=(1-Math.abs((Math.abs(c)-.5)/.5))**.5;g+=-28*c*e,f+=-.5*e,v+=96*e,h=-25*e*Math.abs(c)+"%"}if(u=c<0?`calc(${u}px + (${v*Math.abs(c)}%))`:c>0?`calc(${u}px + (-${v*Math.abs(c)}%))`:`${u}px`,!e.isHorizontal()){const e=h;h=u,u=e}const x=`\n translate3d(${u}, ${h}, ${m}px)\n rotateZ(${g}deg)\n scale(${c<0?""+(1+(1-f)*c):""+(1-(1-f)*c)})\n `;if(a.slideShadows){let e=o.find(".swiper-slide-shadow");0===e.length&&(e=Z(a,o)),e.length&&(e[0].style.opacity=Math.min(Math.max((Math.abs(c)-.5)/.5,0),1))}o[0].style.zIndex=-Math.abs(Math.round(d))+t.length;U(a,o).transform(x)}},setTransition:t=>{const{transformEl:s}=e.params.cardsEffect;(s?e.slides.find(s):e.slides).transition(t).find(".swiper-slide-shadow").transition(t),K({swiper:e,duration:t,transformEl:s})},perspective:()=>!0,overwriteParams:()=>({watchSlidesProgress:!0,virtualTranslate:!e.params.cssMode})})}];return H.use(J),H})); +//# sourceMappingURL=swiper-bundle.min.js.map \ No newline at end of file