fix conflicts

Signed-off-by: Jianna Liu <lindsayliu@yunify.com>
This commit is contained in:
Jianna Liu 2021-02-24 17:17:40 +08:00
commit c5dd2a7480
474 changed files with 5527 additions and 1634 deletions

View File

@ -0,0 +1,100 @@
@import 'variables';
@import 'mixin';
@import 'markdown';
.md-body {
font-size: 16px;
line-height: 2.29;
color: #36435c;
}
.md-body h2 {
font-weight: 500;
line-height: 64px;
color: #171c34;
text-shadow: none;
text-align: left;
margin-bottom: 20px;
border-bottom: 1px solid #ccd3db;
}
.md-body h3 {
font-weight: 600;
line-height: 1.5;
color: #171c34;
}
.md-body img {
max-width: 100%;
box-sizing: content-box;
background-color: #fff;
border-radius: 5px;
box-shadow: none;
}
.md-body blockquote {
padding: 4px 20px 4px 12px;
border-radius: 4px;
background-color: #ecf0f2;
}
header.navigation {
box-shadow: 0 4px 8px 0 rgba(36,46,66,.06), 0 8px 16px 0 rgba(36,46,66,.05);
background-image: linear-gradient(to bottom,rgba(134,219,162,0.9),rgba(0,170,114,0.9));
}
.section-1 {
padding-top: 124px;
& > div {
box-sizing: border-box;
width: 880px;
margin: 0 auto;
padding: 40px 60px 76px;
background-color: #ffffff;
@media only screen and (max-width: $mobile-max-width) {
width: 100%;
padding: 20px;
}
.video-div {
position: relative;
margin-bottom: 24px;
video {
width: 100%;
outline: none;
}
iframe {
width: 100%;
height: 400px;
@media only screen and (max-width: $mobile-max-width) {
height: 200px;
}
}
}
.time-div {
img, span {
vertical-align: middle;
font-size: 14px;
line-height: 1.43;
color: #919aa3;
}
span + img {
margin-left: 20px;
}
}
h1 {
margin-top: 8px;
text-shadow: 0 8px 16px rgba(35, 45, 65, 0.1);
font-size: 40px;
line-height: 1.4;
color: #171c34;
}
}
}

480
assets/scss/live.scss Normal file
View File

@ -0,0 +1,480 @@
@import 'variables';
@import 'mixin';
.btn-a {
display: inline-block;
padding: 0 53px;
height: 56px;
border-radius: 28px;
line-height: 56px;
font-size: 16px;
color: #ffffff;
box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2), 0 10px 50px 0 rgba(34, 43, 62, 0.1);
background-image: linear-gradient(to bottom, rgba(85, 188, 138, 0), rgba(85, 188, 138, 0.1) 97%), linear-gradient(to bottom, #55bc8a, #55bc8a);
&:hover {
box-shadow: none;
}
}
.section-1 {
position: relative;
padding-top: 124px;
padding-bottom: 136px;
}
.section-2 {
position: relative;
margin-top: -94px;
.common-layout {
display: flex;
border-radius: 8px;
box-shadow: 0 4px 8px 0 rgba(36, 46, 66, 0.06), 0 8px 16px 0 rgba(36, 46, 66, 0.05);
overflow: hidden;
background-color: #ffffff;
@media only screen and (max-width: $mobile-max-width) {
flex-direction: column;
border-radius: 0;
}
.live-2-left {
width: 840px;
height: 400px;
@media only screen and (max-width: $mobile-max-width) {
width: 100%;
height: auto;
}
img {
width: 100%;
height: 100%;
min-height: 200px;
object-fit: cover;
}
}
.live-2-right {
box-sizing: border-box;
width: 320px;
height: 400px;
padding: 10px;
@media only screen and (max-width: $mobile-max-width) {
max-width: 320px;
width: auto;
height: auto;
margin: 0 auto;
}
h2 {
margin-bottom: 10px;
font-size: 18px;
line-height: 1.78;
letter-spacing: -0.05px;
color: #3d3e49;
@include block-ellipsis(2);
}
.tag {
position: absolute;
top: 4px;
right: 0;
font-size: 12px;
padding: 4px 10px;
line-height: 12px;
border-top-left-radius: 10px;
border-bottom-left-radius: 10px;
}
.shadow {
position: relative;
padding: 20px 24px;
border-radius: 8px;
box-shadow: 0 4px 8px 0 rgba(36, 46, 66, 0.06), 0 8px 16px 0 rgba(36, 46, 66, 0.05);
}
.notice {
box-sizing: border-box;
height: 250px;
p {
margin-bottom: 6px;
height: 28px;
font-size: 16px;
line-height: 28px;
letter-spacing: -0.04px;
color: #919aa3;
img {
vertical-align: middle;
margin-right: 4px;
}
}
a {
margin: 34px auto 0;
height: 40px;
padding: 0 28px;
line-height: 40px;
}
.tag {
color: #3d3e49;
background-color: #f7b500;
}
}
.over {
box-sizing: border-box;
height: 120px;
margin-top: 10px;
a {
color: #3d3e49;
}
.tag {
color: #ffffff;
background-color: #919aa3;
}
}
}
}
}
.section-3 {
background-color: #fff;
padding-top: 30px;
padding-bottom: 60px;
@media only screen and (max-width: $mobile-max-width) {
padding-top: 20px;
padding-bottom: 40px;
}
& > div {
& > .video-tab-ul {
padding: 0 34px;
border-radius: 5px;
box-shadow: 0 4px 16px 0 rgba(7,42,68,.1);
background-color: #fff;
li {
display: inline-block;
min-width: 48px;
padding: 8px 16px;
margin: 10px 0;
border-radius: 20px;
-webkit-transition: all .2s ease-in-out;
transition: all .2s ease-in-out;
cursor: pointer;
font-size: 16px;
font-weight: 500;
line-height: 1.5;
text-align: center;
&:hover {
box-shadow: 0 8px 16px 0 rgba(101,193,148,.2),0 0 50px 0 rgba(101,193,148,.1);
background-color: #55bc8a;
color: #fff;
}
}
.active {
box-shadow: 0 8px 16px 0 rgba(101,193,148,.2),0 0 50px 0 rgba(101,193,148,.1);
background-color: #55bc8a;
color: #fff;
}
li + li {
margin-left: 12px;
}
}
.video-ul {
margin-top: 20px;
font-size: 0;
@media only screen and (max-width: $mobile-max-width) {
text-align: center;
}
& > li {
position: relative;
display: inline-block;
width: 360px;
height: 202px;
border-radius: 4px;
margin-bottom: 40px;
margin-right: 40px;
background-color: #eef4f8;
background-image: url('/images/videos/video-bg.svg');
overflow: hidden;
text-align: left;
cursor: pointer;
& > img {
width: 100%;
height: 100%;
}
&:hover {
& > div {
height: 202px;
}
}
& > div {
position: absolute;
left: 0;
right: 0;
bottom: 0;
height: 70px;
opacity: 0.89;
background-color: #171c34;
transition: all .2s ease-in-out;
overflow: hidden;
& > .btn {
position: absolute;
left: 50%;
bottom: 120px;
transform: translateX(-50%);
}
& > div {
position: absolute;
left: 0;
right: 0;
bottom: 0;
height: 70px;
padding: 0 16px;
h3 {
font-size: 16px;
font-weight: 500;
line-height: 22px;
color: #fff;
padding: 8px 0;
margin-bottom: 6px;
border-bottom: 1px solid hsla(0,0%,100%,.1);
text-overflow: ellipsis;
white-space: nowrap;
overflow: hidden;
}
p {
font-size: 14px;
line-height: 20px;
color: #919aa3;
}
.time {
position: absolute;
right: 16px;
bottom: 6px;
}
}
}
@media only screen and (max-width: $mobile-max-width) {
width: 330px;
height: 190px;
display: block;
margin: 30px auto;
}
}
li:nth-child(3n) {
margin-right: 0;
@media only screen and (max-width: $mobile-max-width) {
margin: 30px auto;
}
}
}
& > div {
margin-top: 20px;
text-align: center;
li {
display: inline-block;
margin: 0 10px;
}
.disabled {
a {
color: #ccd3db;
}
}
.active {
a {
color: #55bc8a;
}
}
}
}
}
.jquery-modal {
z-index: 10;
background-color: rgba(23, 28, 52, 0.5);
}
#modal-for-video {
width: 750px;
max-width: 750px;
height: 420px;
padding: 0;
border-radius: 0;
font-size: 0;
.video-div {
height: 100%;
}
video {
width: 100%;
height: 100%;
background-color: #171c34;
outline: none;
}
a {
top: -40px;
right: 0;
}
@media only screen and (max-width: $mobile-max-width) {
width: 100%;
max-width: 100%;
height: auto;
iframe {
width: 100%;
height: 300px;
}
}
}
.section-4 {
background-image: linear-gradient(113deg, #4a499a 27%, #8552c3 81%);
.common-layout {
white-space: nowrap;
overflow: auto;
& > div {
box-sizing: border-box;
display: inline-block;
vertical-align: top;
white-space: normal;
width: 140px;
height: 225px;
margin: 80px 40px;
padding-top: 20px;
border-top: 1px solid #a1b3c4;
.time-div {
display: flex;
.right {
margin-left: 4px;
font-weight: bold;
line-height: 1;
color: #ffffff;
.date {
margin-bottom: 4px;
font-size: 24px;
}
.time {
font-size: 14px;
}
}
}
h3 {
height: 60px;
margin: 21px 0 47px;
font-size: 14px;
font-weight: 500;
line-height: 1.43;
color: #d5dee7;
a {
color: #d5dee7;
&:hover {
color: #008a5c;
}
}
}
button {
font-size: 12px;
font-weight: 600;
line-height: 2;
border: none;
padding: 5px 28px;
border-radius: 17px;
cursor: pointer;
box-shadow: 0 10px 50px 0 rgba(34, 43, 62, 0.1), 0 8px 16px 0 rgba(33, 43, 61, 0.2);
&:hover {
box-shadow: none;
}
}
.over-btn {
color: #ffffff;
background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #242e42, #242e42);
}
.notive-btn {
color: #3d3e49;
background-image: linear-gradient(to bottom, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.1) 97%), linear-gradient(to bottom, #ffffff, #ffffff);
}
}
}
}
.section-5 {
.common-layout {
position: relative;
padding-top: 100px;
padding-left: 60px;
padding-bottom: 30px;
@media only screen and (max-width: $mobile-max-width) {
padding-left: 20px;
}
.left-div {
position: relative;
width: 600px;
@media only screen and (max-width: $mobile-max-width) {
width: 100%;
z-index: 2;
}
h2 {
font-size: 32px;
font-weight: 600;
line-height: 1.63;
color: #171c34;
text-shadow: 0 4px 8px rgba(35, 45, 65, 0.1);
}
p {
margin-top: 13px;
font-size: 16px;
line-height: 1.5;
letter-spacing: -0.04px;
color: #31383e;
}
a {
margin-top: 18px;
}
}
& > img {
position: absolute;
top: 88px;
right: 0;
@media only screen and (max-width: $mobile-max-width) {
opacity: 0.3;
}
}
}
}

View File

@ -55,3 +55,128 @@
align-items: center;
}
@mixin TypographyTitle($fontWeight: $font-bold) {
font-family: $font-family-id;
font-style: normal;
font-stretch: normal;
letter-spacing: normal;
font-weight: $fontWeight;
}
@mixin TypographyTitleH1($color: $dark-color06) {
font-size: 40px;
line-height: 1.4;
@include TypographyTitle();
color: $color;
}
@mixin TypographyTitleH2($color: $dark-color06) {
font-size: 32px;
line-height: 1.38;
@include TypographyTitle();
color: $color;
}
@mixin TypographyTitleH3($color: $dark-color06) {
font-size: 24px;
line-height: 1.33;
@include TypographyTitle();
color: $color;
}
@mixin TypographyTitleH4($color: $dark-color06) {
font-size: 20px;
line-height: 1.4;
@include TypographyTitle();
color: $color;
}
@mixin TypographyTitleH5($color: $dark-color06) {
font-size: 14px;
line-height: 1.43;
@include TypographyTitle(bold);
color: $color;
}
@mixin TypographyTitleH6($color: $dark-color06) {
font-size: $size-small;
line-height: 1.67;
@include TypographyTitle(bold);
color: $color;
}
@mixin TypographySymbolText($color: $dark-color06) {
font-family: $font-family;
font-size: $size-small;
font-weight: $font-bold;
font-style: normal;
font-stretch: normal;
line-height: 1.67;
letter-spacing: normal;
color: $color;
}
@mixin TypographyParagraphCode($color: #363e4a) {
font-family: Monaco;
font-size: $size-small;
font-weight: normal;
font-style: normal;
font-stretch: normal;
line-height: 2;
letter-spacing: normal;
color: $color;
}
@mixin TypographyParagraph($color: $dark-color07) {
font-family: $font-family-id;
font-size: $size-small;
font-weight: normal;
font-style: normal;
font-stretch: normal;
line-height: 1.67;
letter-spacing: normal;
color: $color;
}
@mixin common-flex {
display: flex;
flex-wrap: wrap;
justify-content: space-between;
}
@mixin absolute-center {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
}
@mixin product-container {
width: 277px;
height: 172px;
padding: 12px;
border-radius: 3px;
border: solid 1px $light-color06;
cursor: pointer;
&:hover {
box-shadow: 0 4px 8px 0 rgba(36, 46, 66, 0.2);
border: solid 1px $dark-color01;
}
}
@mixin common-layout {
position: relative;
width: 1140px;
margin: 0 auto;
padding-left: 260px;
@media only screen and (max-width: $width-01) {
width: 100%;
}
@media only screen and (max-width: $width-02) {
padding: 10px;
padding-top: 20px;
}
}

View File

@ -22,9 +22,9 @@ Here is my node information of the existing Kubernetes cluster.
| Host IP | Host Name | Role | System |
| ---------- | --------- | ------------ | ----------------------------------------- |
| 172.16.0.2 | master1 | master, etcd | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.3 | worker1 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.4 | worker2 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.2 | master1 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.3 | worker1 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.4 | worker2 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
```bash
$ kubectl get nodes
@ -38,9 +38,9 @@ Here are the nodes that will be added to the cluster to achieve high availabilit
| Host IP | Host Name | Role | System |
| ---------- | --------- | ------------ | ----------------------------------------- |
| 172.16.0.5 | master2 | master, etcd | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.6 | master3 | master, etcd | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.7 | worker3 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 172.16.0.5 | master2 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.6 | master3 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.7 | worker3 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
![node-info](https://ap3.qingstor.com/kubesphere-website/docs/ha-architecture-node-info.jpg)

View File

@ -30,9 +30,9 @@ I am going to create a cluster with three nodes on cloud. Here is my machine con
| Host IP | Host Name | Role | System |
| ----------- | --------- | ------------ | ----------------------------------------- |
| 192.168.0.2 | master | master, etcd | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.3 | worker1 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.4 | worker2 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.2 | master | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 192.168.0.3 | worker1 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 192.168.0.4 | worker2 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
{{< notice note >}}

View File

@ -23,9 +23,9 @@ Here is my node information of the existing Kubernetes cluster.
| Host IP | Host Name | Role | System |
| ----------- | --------- | ------------ | ----------------------------------------- |
| 192.168.0.2 | master | master, etcd | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.3 | worker1 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.4 | worker2 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.2 | master | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 192.168.0.3 | worker1 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 192.168.0.4 | worker2 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
```bash
$ kubectl get nodes
@ -39,7 +39,7 @@ Here is the node that will be added to the cluster first and then removed from t
| Host IP | Host Name | Role | System |
| ----------- | --------- | ------ | ----------------------------------------- |
| 192.168.0.5 | worker3 | worker | CentOS 7.5, 8 Core, 8 G Memory, 50 G Disk |
| 192.168.0.5 | worker3 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
For more information about requirements for nodes, network, and dependencies, [see my last post](https://kubesphere.io/blogs/install-kubernetes-using-kubekey/#node-requirements).

View File

@ -0,0 +1,398 @@
---
title: 'Create a Highly Available Kubernetes Cluster Using Keepalived and HAproxy'
keywords: Kubernetes, Keepalived, HAproxy, KubeKey, HA
description: Use Keepalived and HAproxy to create an HA cluster.
tag: 'Kubernetes, KubeKey, installation, installer, HA'
createTime: '2021-01-27'
author: 'Pixiake, Sherlock'
snapshot: 'https://ap3.qingstor.com/kubesphere-website/docs/architecture-ha-k8s-cluster.png'
---
A highly available Kubernetes cluster ensures your applications run without outages which is required for production. In this connection, there are plenty of ways for you to choose from to achieve high availability. For example, if your cluster is deployed on cloud (e.g. Google Cloud and AWS), you can create load balancers on these platforms directly. At the same time, Keepalived, HAproxy and NGINX are also possible alternatives for you to achieve load balancing.
In this article, I am going to use Keepalived and HAproxy for load balancing and achieve high availability. The steps are listed as below:
1. Prepare hosts.
2. Configure Keepalived and HAproxy.
3. Use KubeKey to set up a Kubernetes cluster.
## Cluster Architecture
In my cluster, I will set three master nodes, three worker nodes, two nodes for load balancing and one virtual IP address. The virtual IP address in this example may also be called "a floating IP address". That means in the event of node failures, the IP address can be passed between nodes allowing for failover, thus achieving high availability.
![architecture](https://ap3.qingstor.com/kubesphere-website/docs/architecture-ha-k8s-cluster.png)
Notice that in my cluster, I am not going to install Keepalived and HAproxy on any of the master nodes. Admittedly, you can do that and high availability can also be achieved. That said, I would like to try a different way by configuring two specific nodes for load balancing (You can add more nodes of this kind as needed). Only Keepalived and HAproxy will be installed on these two nodes, avoiding any potential conflicts with any Kubernetes components and services.
## Host Information
Here is the detailed information of each node in my cluster for your reference:
| IP Address | Host Name | Role | System |
| ----------- | --------- | -------------------- | ------------------------------------------ |
| 172.16.0.2 | lb1 | Keepalived & HAproxy | CentOS 7.5, 4 Cores, 4 G Memory, 20 G Disk |
| 172.16.0.3 | lb2 | Keepalived & HAproxy | CentOS 7.5, 4 Cores, 4 G Memory, 20 G Disk |
| 172.16.0.4 | master1 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.5 | master2 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.6 | master3 | master, etcd | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.7 | worker1 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.8 | worker2 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.9 | worker3 | worker | CentOS 7.5, 8 Cores, 8 G Memory, 50 G Disk |
| 172.16.0.10 | | Virtual IP address | |
For more information about requirements for nodes, network, and dependencies, [see one of my previous posts](https://kubesphere.io/blogs/install-kubernetes-using-kubekey/#node-requirements).
## Configure Load Balancing
[Keepalived](https://www.keepalived.org/) provides a VRPP implementation and allows you to configure Linux machines for load balancing, preventing single points of failure. [HAProxy](http://www.haproxy.org/), providing reliable, high performance load balancing, works perfectly with Keepalived.
As I said above, I will install both Keepalived and HAproxy on `lb1` and `lb2`. The logic is very simple: if one of the node goes down, the virtual IP address (i.e. the floating IP address) will be automatically associated with another node so that the cluster is still functioning well, thus achieving high availability. If you want, you can add more nodes all with Keepalived and HAproxy installed for that purpose.
Run the following command to install Keepalived and HAproxy first.
```bash
yum install keepalived haproxy psmisc -y
```
### HAproxy
1. The configuration of HAproxy is exactly the same on the two machines for load balancing. Run the following command to configure HAproxy.
```bash
vi /etc/haproxy/haproxy.cfg
```
2. Here is my configuration for your reference (Pay attention to the `server` field. Note that `6443` is the `apiserver` port):
```bash
global
log /dev/log local0 warning
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend kube-apiserver
bind *:6443
mode tcp
option tcplog
default_backend kube-apiserver
backend kube-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 172.16.0.4:6443 check # Replace the IP address with your own.
server kube-apiserver-2 172.16.0.5:6443 check # Replace the IP address with your own.
server kube-apiserver-3 172.16.0.6:6443 check # Replace the IP address with your own.
```
3. Save the file and run the following command to restart HAproxy.
```bash
systemctl restart haproxy
```
4. Make it persist through reboots:
```bash
systemctl enable haproxy
```
5. Make sure you configure HAproxy on the other machine (`lb2`) as well.
### Keepalived
Keepalived must be installed on both machines while the configuration of them is slightly different.
1. Run the following command to configure Keepalived.
```bash
vi /etc/keepalived/keepalived.conf
```
2. Here is my configuration (`lb1`) for your reference:
```bash
global_defs {
notification_email {
}
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 2
}
vrrp_instance haproxy-vip {
state BACKUP
priority 100
interface eth0 # Network card
virtual_router_id 60
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 172.16.0.2 # The IP address of this machine
unicast_peer {
172.16.0.3 # The IP address of peer machines
}
virtual_ipaddress {
172.16.0.10/24 # The VIP address
}
track_script {
chk_haproxy
}
}
```
{{< notice note >}}
- For the `interface` field, you must provide your own network card information. You can run `ifconfig` on your machine to get the value.
- The IP address provided for `unicast_src_ip` is the IP address of your current machine. For other machines where HAproxy and Keepalived are also installed for load balancing, their IP address must be input for the field `unicast_peer`.
{{</ notice >}}
3. Save the file and run the following command to restart Keepalived.
```bash
systemctl restart keepalived
```
4. Make it persist through reboots:
```bash
systemctl enable haproxy
```
5. Make sure you configure Keepalived on the other machine (`lb2`) as well.
## Verify HA
Before you start to create your Kubernetes cluster, make sure you have tested the high availability.
1. On the machine `lb1`, run the following command:
```bash
[root@lb1 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff
inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 73334sec preferred_lft 73334sec
inet 172.16.0.10/24 scope global secondary eth0 # The VIP address
valid_lft forever preferred_lft forever
inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
2. As you can see above, the virtual IP address is successfully added. Simulate a failure on this node:
```bash
systemctl stop haproxy
```
3. Check the floating IP address again and you can see it disappear on `lb1`.
```bash
[root@lb1 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff
inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 72802sec preferred_lft 72802sec
inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
4. Theoretically, the virtual IP will be failed over to the other machine (`lb2`) if the configuration is successful. On `lb2`, run the following command and here is the expected output:
```bash
[root@lb2 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:3f:51:ba brd ff:ff:ff:ff:ff:ff
inet 172.16.0.3/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 72690sec preferred_lft 72690sec
inet 172.16.0.10/24 scope global secondary eth0 # The VIP address
valid_lft forever preferred_lft forever
inet6 fe80::f67c:bd4f:d6d5:1d9b/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
5. As you can see above, high availability is successfully configured.
## Use KubeKey to Create a Kubernetes Cluster
[KubeKey](https://github.com/kubesphere/kubekey) is an efficient and convenient tool to create a Kubernetes cluster. If you are not familiar with KubeKey, have a look at my previous articles about using KubeKey to [create a three-node cluster](https://kubesphere.io/blogs/install-kubernetes-using-kubekey/) and scale your cluster.
1. Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command to download KubeKey version 1.0.1. You only need to download KubeKey to one of your machines (e.g. `master1`) that serves as the **taskbox** for installation.
```bash
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.0.1 sh -
```
2. The above command downloads KubeKey and unzips the file. Your folder now contains a file called `kk`. Make it executable:
```bash
chmod +x kk
```
3. Create a configuration file to specify cluster information. The Kubernetes version I am going to install is `v1.17.9`.
```bash
./kk create config --with-kubernetes v1.17.9
```
4. A default file `config-sample.yaml` will be created. Edit the file and here is my configuration for your reference:
```yaml
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: master1, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123}
- {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123}
- {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123}
- {name: worker1, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123}
- {name: worker2, address: 172.16.0.8, internalAddress: 172.16.0.8, user: root, password: Testing123}
- {name: worker3, address: 172.16.0.9, internalAddress: 172.16.0.9, user: root, password: Testing123}
roleGroups:
etcd:
- master1
- master2
- master3
master:
- master1
- master2
- master3
worker:
- worker1
- worker2
- worker3
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: 172.16.0.10 # The VIP address
port: 6443
kubernetes:
version: v1.17.9
imageRepo: kubesphere
clusterName: cluster.local
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
registryMirrors: []
insecureRegistries: []
addons: []
```
{{< notice note >}}
- Replace the value of `controlPlaneEndpoint.address` with your own VIP address.
- For more information about different parameters in this configuration file, see [one of my previous blogs](https://kubesphere.io/blogs/install-kubernetes-using-kubekey/#install-kubernetes).
{{</ notice >}}
5. Save the file and execute the following command to create your cluster:
```bash
./kk create cluster -f config-sample.yaml
```
6. You can see the output as below when the installation finishes.
```bash
Congratulations! Installation is successful.
```
7. Execute the following command to check the status of namespaces.
```bash
kubectl get pod --all-namespaces
```
```bash
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-59d85c5c84-l7zp5 1/1 Running 0 42s
kube-system calico-node-5d6gb 1/1 Running 0 21s
kube-system calico-node-77bcj 1/1 Running 0 42s
kube-system calico-node-bdzfp 1/1 Running 0 21s
kube-system calico-node-ph756 1/1 Running 0 22s
kube-system calico-node-phz7d 1/1 Running 0 22s
kube-system calico-node-v7wnf 1/1 Running 0 22s
kube-system coredns-74d59cc5c6-gdkmz 1/1 Running 0 53s
kube-system coredns-74d59cc5c6-j2lhc 1/1 Running 0 53s
kube-system kube-apiserver-master1 1/1 Running 0 48s
kube-system kube-apiserver-master2 1/1 Running 0 19s
kube-system kube-apiserver-master3 1/1 Running 0 19s
kube-system kube-controller-manager-master1 1/1 Running 0 48s
kube-system kube-controller-manager-master2 1/1 Running 0 19s
kube-system kube-controller-manager-master3 1/1 Running 0 19s
kube-system kube-proxy-29sfc 1/1 Running 0 21s
kube-system kube-proxy-drzsc 1/1 Running 0 22s
kube-system kube-proxy-lgwhd 1/1 Running 0 22s
kube-system kube-proxy-npq6t 1/1 Running 0 21s
kube-system kube-proxy-srlwx 1/1 Running 0 22s
kube-system kube-proxy-vdtbk 1/1 Running 0 53s
kube-system kube-scheduler-master1 1/1 Running 0 48s
kube-system kube-scheduler-master2 1/1 Running 0 19s
kube-system kube-scheduler-master3 1/1 Running 0 20s
kube-system nodelocaldns-2chnt 1/1 Running 0 22s
kube-system nodelocaldns-2wszl 1/1 Running 0 22s
kube-system nodelocaldns-2xqlc 1/1 Running 0 21s
kube-system nodelocaldns-92ksq 1/1 Running 0 53s
kube-system nodelocaldns-cktmd 1/1 Running 0 22s
kube-system nodelocaldns-skmlq 1/1 Running 0 21s
```
## Summary
Creating a highly available Kubernetes cluster is not just about business applications running without downtime. It is also about selecting the correct tools and using them to set up the cluster with high availability in the most graceful and efficient way. Why not try Keepalived, HAproxy and KubeKey? Perhaps they will give you the answer you have been seeking for so long.
## Reference
[KubeKey: A Lightweight Installer for Kubernetes and Cloud Native Addons](https://kubesphere.io/blogs/install-kubernetes-using-kubekey/)
[KubeKey GitHub Repository](https://github.com/kubesphere/kubekey)

View File

@ -81,12 +81,32 @@ section3:
icon: 'images/case/section6-99kvm.jpg'
- name: 'mile-works'
icon: 'images/case/section6-mile-works.jpg'
- name: 'uisee'
icon: 'images/case/section6-uisee.png'
- name: 'hongya'
icon: 'images/case/section6-hongya.png'
- name: 'ruijie'
icon: 'images/case/section6-ruijie.png'
- name: 'supaur'
icon: 'images/case/section6-supaur.png'
- name: 'supaur'
icon: 'images/case/section6-xiaodiketang.png'
- name: 'Internet'
children:
- name: 'sina'
icon: 'images/case/section6-sina.jpg'
- name: 'Logistics'
children:
- name: 'ZTO'
icon: 'images/case/section6-zto.png'
- name: 'MCN'
children:
- name: 'yaowang'
icon: 'images/case/section6-yaowang.png'
bottomContent:
content: Want your logo up there? Just

View File

@ -5,8 +5,8 @@ layout: "scenario"
css: "scss/scenario.scss"
section1:
title: KubeSphere DevOps offers end-to-end workflow and integrates popular CI/CD tools to boost delivery.
content: KubeSphere DevOps provides CI/CD pipeline based on Jenkins, and offers automated workflows including binary-to-image (B2I) and source-to-image (S2I), helps organizations accelerate time to market for their product.
title: KubeSphere DevOps offers end-to-end workflows and integrates popular CI/CD tools to boost delivery.
content: KubeSphere DevOps provides CI/CD pipelines based on Jenkins with automated workflows including Binary-to-Image (B2I) and Source-to-Image (S2I). It helps organizations accelerate the time to market for products.
image: /images/devops/banner.jpg
image: /images/devops/dev-ops.png
@ -14,11 +14,11 @@ image: /images/devops/dev-ops.png
section2:
title: Automatically Checkout Code, Test, Analyse, Build, Deploy and Release
list:
- title: Out-of-box CI/CD Pipeline
- title: Out-of-box CI/CD Pipelines
image: /images/devops/CD-pipeline.png
contentList:
- content: <span>Easy to integrate with your SCM,</span> supporting GitLab / GitHub / BitBucket / SVN
- content: <span>Design a graphical editing panel</span> to create CI/CD pipelines, without writing Jenkinsfile
- content: <span>Design graphical editing panels</span> to create CI/CD pipelines without writing any Jenkinsfile
- content: <span>Integrate SonarQube</span> to implement source code quality analysis
- content: <span>Support dependency cache</span> to accelerate build and deployment
- content: <span>Provide dynamic build agents</span> to automatically spin up Pods as necessary
@ -26,18 +26,18 @@ section2:
- title: Built-in Automated Toolkits
image: /images/devops/Built-in-automated-toolkits.png
contentList:
- content: <span>Source to Image</span> builds reproducible container images from source code without writing dockerfile
- content: <span>Binary-to-image</span> is the bridge between your artifact and a runnable image
- content: <span>Support automatically building and pushing</span> images to any registry, and finally deploy them to Kubernetes
- content: <span>Source-to-Image</span> builds reproducible container images from source code without writing any Dockerfile
- content: <span>Binary-to-Image</span> is the bridge between your artifact and a runnable image
- content: <span>Support automatically building and pushing</span> images to any registry, and finally deploying them to Kubernetes
- content: <span>Provide excellent recoverability and flexibility</span> as you can rebuild and rerun S2I / B2I whenever a patch is needed
- title: Use GitOps to implement DevOps, not just culture
- title: Use GitOps to Implement DevOps
image: /images/devops/Clear-insight.png
contentList:
- content: <span>Combine Git with Kubernetes convergence, and automates the cloud native Apps delivery</span>
- content: <span>Designed for teams, offer built-in multitenancy in DevOps project</span>
- content: <span>Liable to be observable,</span> provide dynamic logs for the S2I / B2I build and pipeline
- content: Provide audit, alert and notification in pipeline, ensuring issues can be quickly located and solved
- content: <span>Combine Git with Kubernetes, automating cloud-native app delivery</span>
- content: <span>Designed for DevOps teamwork on the basis of the multi-tenant system of KubeSphere</span>
- content: <span>Powerful observability,</span> providing dynamic logs for S2I / B2I builds and pipelines
- content: Provide auditing, alerting and notifications in pipelines, ensuring issues can be quickly located and solved
- content: Support adding Git SCM webhooks to trigger a Jenkins build when new commits are submitted to the branch
section3:

View File

@ -17,7 +17,7 @@ Using [Redis](https://redis.io/) as an example application, this tutorial demons
## Prerequisites
- You need to enable [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/).
- You need to enable the [KubeSphere App Store (OpenPitrix)](../../pluggable-components/app-store/).
- You need to create a workspace, a project and an account (`project-regular`). For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project/).
## Hands-on Lab
@ -135,7 +135,7 @@ The version number must start with a number and contain decimal points.
### Step 3: Review the application
1. Log out and log back in KubeSphere as `reviewer`. Click **Platform** in the top left corner and select **App Store Management**. On the **App Review** page, the app submitted in the previous step displays under the tab **Unprocessed**.
1. Log out of KubeSphere and log back in as `reviewer`. Click **Platform** in the top left corner and select **App Store Management**. On the **App Review** page, the app submitted in the previous step displays under the tab **Unprocessed**.
![app-to-be-reviewed](/images/docs/appstore/application-lifecycle-management/app-to-be-reviewed.jpg)
@ -149,7 +149,7 @@ The version number must start with a number and contain decimal points.
After the app is approved, `isv` can release the Redis application to the App Store, allowing all users on the platform to find and deploy this application.
1. Log out and log back in KubeSphere as `isv`. Go to your workspace and click Redis on the **App Templates** page. On its detail page, expand the version menu, then click **Release to Store**. In the pop-up prompt, click **OK** to confirm.
1. Log out of KubeSphere and log back in as `isv`. Go to your workspace and click Redis on the **App Templates** page. On its detail page, expand the version menu, then click **Release to Store**. In the pop-up prompt, click **OK** to confirm.
![app-templates-page](/images/docs/appstore/application-lifecycle-management/app-templates-page.jpg)

View File

@ -0,0 +1,205 @@
---
title: "Deploy TiDB Operator and a TiDB Cluster on KubeSphere"
keywords: 'KubeSphere, Kubernetes, TiDB, TiDB Operator, TiDB Cluster'
description: 'How to deploy TiDB Operator and a TiDB Cluster on KubeSphere'
linkTitle: "Deploy TiDB Operator and a TiDB Cluster"
weight: 14320
---
[TiDB](https://en.pingcap.com/) is a cloud-native, open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads. It features horizontal scalability, strong consistency, and high availability.
This tutorial demonstrates how to deploy TiDB Operator and a TiDB Cluster on KubeSphere.
## Prerequisites
- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/).
- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/).
## Hands-on Lab
### Step 1: Install TiDB Operator CRD
1. Log in to the KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the bottom right corner to execute the following command to install TiDB Operator CRD:
```bash
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/manifests/crd.yaml
```
2. You can see the expected output as below:
```bash
customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created
```
### Step 2: Add an app repository
1. Log out of KubeSphere and log back in as `ws-admin`. In your workspace, go to **App Repos** under **Apps Management**, and then click **Add Repo**.
![add-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG)
2. In the dialog that appears, enter `pingcap` for the app repository name and `https://charts.pingcap.org` for the PingCAP Helm repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue.
![add-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG)
3. Your repository displays in the list after successfully imported to KubeSphere.
![added-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG)
### Step 3: Deploy TiDB Operator
1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Applications** under **Application Workloads** and click **Deploy New Application**.
![deploy-app](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG)
2. In the dialog that appears, select **From App Templates**.
![from-app-templates](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG)
3. Select `pingcap` from the drop-down list, then click **tidb-operator**.
![click-tidb-operator](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG)
{{< notice note >}}
This tutorial only demonstrates how to deploy TiDB Operator and a TiDB cluster. You can also deploy other tools based on your needs.
{{</ notice >}}
4. On the **Chart Files** tab, you can view the configuration from the console directly or download the default `values.yaml` file by clicking the icon in the upper right corner. Under **Versions**, select a version number from the drop-down list and click **Deploy**.
![select-version](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG)
5. On the **Basic Info** page, confirm the app name, app version, and deployment location. Click **Next** to continue.
![basic-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG)
6. On the **App Config** page, you can either edit the `values.yaml` file, or click **Deploy** directly with the default configurations.
![check-config-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG)
7. Wait for TiDB Operator to be up and running.
![tidb-operator-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG)
8. Go to **Workloads**, and you can see two Deployments created for TiDB Operator.
![tidb-deployment](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG)
### Step 4: Deploy a TiDB cluster
The process of deploying a TiDB cluster is similar to deploying TiDB Operator.
1. Go to **Applications** under **Application Workloads**, click **Deploy New Application** again, and then select **From App Templates**.
![deploy-app-again](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG)
![from-app-templates-2](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG)
2. From the PingCAP repository, click **tidb-cluster**.
![click-tidb-cluster](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG)
3. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Deploy** to continue.
![download-yaml-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG)
4. On the **Basic Info** page, confirm the app name, app version, and deployment location. Click **Next** to continue.
![tidb-cluster-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG)
5. Some TiDB components require [persistent volumes](../../../cluster-administration/persistent-volume-and-storage-class/). You can run the following command to view your storage classes.
```
/ # kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-high-capacity-legacy csi-qingcloud Delete Immediate true 71m
csi-high-perf csi-qingcloud Delete Immediate true 71m
csi-ssd-enterprise csi-qingcloud Delete Immediate true 71m
csi-standard (default) csi-qingcloud Delete Immediate true 71m
csi-super-high-perf csi-qingcloud Delete Immediate true 71m
```
6. On the **App Config** page, change the default value of the field `storageClassName` from `local-storage` to the name of your storage class. For example, you can change it to `csi-qingcloud` based on the above output.
![tidb-cluster-config](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG)
{{< notice note >}}
Only the field `storageClassName` is changed to provide external persistent storage. If you want to deploy each TiDB component, such as [TiKV](https://docs.pingcap.com/tidb/dev/tidb-architecture#tikv-server) and [Placement Driver](https://docs.pingcap.com/tidb/dev/tidb-architecture#placement-driver-pd-server), to individual nodes, specify the field `nodeAffinity`.
{{</ notice >}}
7. Click **Deploy** and you can see two apps in the list as shown below:
![tidb-cluster-app-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG)
### Step 5: View TiDB cluster status
1. Go to **Workloads** under **Application Workloads**, and verify that all TiDB cluster Deployments are up and running.
![tidb-cluster-deployments-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG)
2. Switch to the **StatefulSets** tab, and you can see TiDB, TiKV and PD are up and running.
![tidb-statefulsets](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG)
{{< notice note >}}
TiKV and TiDB will be created automatically and it may take a while before they display in the list.
{{</ notice >}}
3. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab.
TiDB metrics:
![tidb-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG)
TiKV metrics:
![tikv-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG)
PD metrics:
![pd-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG)
4. In **Pods** under **Application Workloads**, you can see the TiDB cluster contains two TiDB Pods, three TiKV Pods, and three PD Pods.
![tidb-pod-list](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG)
5. In **Volumes** under **Storage**, you can see TiKV and PD are using persistent volumes.
![tidb-storage-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG)
6. Volume usage is also monitored. Click a volume item to go to its detail page. Here is an example of TiKV:
![tikv-volume-status](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG)
7. On the **Overview** page of the project, you can see a list of resource usage in the current project.
![tidb-project-resource-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG)
### Step 6: Access the TiDB cluster
1. Go to **Services** under **Application Workloads**, and you can see detailed information of all Services. As the Service type is set to `NodePort` by default, you can access it through the Node IP address outside the cluster.
![tidb-service](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG)
3. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `{$NodeIP}:{Nodeport}` to view metrics.
![tidb-service-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG)
![tidb-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.PNG)
{{< notice note >}}
You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed.
{{</ notice >}}

View File

@ -25,7 +25,7 @@ Monitor node status and learn how to add node labels or taints.
## [Cluster Status Monitoring](../cluster-administration/cluster-status-monitoring/)
Monitor how a cluster is functioning based on different metrics, including physical resources, ETCD, and APIServer.
Monitor how a cluster is functioning based on different metrics, including physical resources, etcd, and APIServer.
## [Application Resources Monitoring](../cluster-administration/application-resources-monitoring/)

View File

@ -28,7 +28,7 @@ You have created a node-level alerting policy and received alert notifications o
![alerting_message_node_level_list](/images/docs/alerting/alerting_message_node_level_list.png)
5. Click the alerting message to enter the detail page. In **Alerting Detail**, you can see the graph of memory utilization rate of the node over time, which has been continuously higher than the threshold of `50%` set in the alert rule, so the alert was triggered.
5. Click the alerting message to go to its detail page. In **Alerting Detail**, you can see the graph of memory utilization rate of the node over time, which has been continuously higher than the threshold of `50%` set in the alert rule, so the alert was triggered.
![alerting_message_node_level_detail](/images/docs/alerting/alerting_message_node_level_detail.png)

View File

@ -36,7 +36,7 @@ KubeSphere provides alerting policies for nodes and workloads. This guide demons
In the dialog that appears, fill in the basic information as follows. Click **Next** after you finish.
- **Name**: a concise and clear name as its unique identifier, such as `alert-demo`.
- **Alias**: to help you distinguish alerting policies better. Chinese is supported.
- **Alias**: to help you distinguish alerting policies better.
- **Description**: a brief introduction to the alerting policy.
![alerting_policy_node_level_basic_info](/images/docs/alerting/alerting_policy_node_level_basic_info.png)

View File

@ -50,7 +50,7 @@ You have shut down your cluster gracefully.
{{< notice tip >}}
Usually, a cluster can be used after restarting, but the cluster may be unavailable due to unexpected conditions. For example:
- Etcd data corruption during the shutdown.
- etcd data corruption during the shutdown.
- Node failures.
- Unexpected network errors.

View File

@ -11,7 +11,7 @@ icon: "/images/docs/docs.svg"
To deploy and manage your CI/CD tasks and related workloads on your Kubernetes clusters, you use the KubeSphere DevOps system. This chapter demonstrates how to manage and work in DevOps projects, including running pipelines, creating credentials, and integrating tools.
As you install the DevOps component, Jenkins is automatically deployed. KubeSphere provides you with a consistent user experience as you can build a pipeline through the Jenkinsfile just as you did before. Besides, KubeSphere features a highly responsive graphical dashboard that visualizes the whole process, presenting you with a straightforward view of how your pipeline is running at what stage.
As you install the DevOps component, Jenkins is automatically deployed. KubeSphere provides you with a consistent user experience as you can build a pipeline through the Jenkinsfile just as you did before. Besides, KubeSphere features graphical editing panels that visualize the whole process, presenting you with a straightforward view of how your pipeline is running at what stage.
## Understand and Manage DevOps Projects
@ -69,7 +69,7 @@ Integrate SonarQube into your pipeline for code quality analysis.
### [Integrate Harbor in Pipelines](../devops-user-guide/how-to-integrate/harbor/)
Integrate Harbor to push an image to your Harbor registry using a KubeSphere pipeline.
Integrate Harbor into your pipeline to push images to your Harbor registry.
## Examples
@ -77,10 +77,10 @@ Integrate Harbor to push an image to your Harbor registry using a KubeSphere pip
Learn how to build and deploy a Go project using a KubeSphere pipeline.
### [Deploy Apps in a Multi-cluster Project Using a Jenkinsfile](../devops-user-guide/examples/multi-cluster-project-example/)
Learn how to deploy apps in a multi-cluster project using a Jenkinsfile-based pipeline.
### [Build and Deploy a Maven Project](../devops-user-guide/examples/a-maven-project/)
Learn how to build and deploy a Maven project using a KubeSphere pipeline.
### [Deploy Apps in a Multi-cluster Project Using a Jenkinsfile](../devops-user-guide/examples/multi-cluster-project-example/)
Learn how to deploy a multi-cluster project using a KubeSphere Jenkinsfile-based pipeline.

View File

@ -1,30 +1,30 @@
---
title: "Build and Deploy a Maven Project"
keywords: 'kubernetes, docker, devops, jenkins, maven'
description: ''
description: 'How to build and deploy a Maven project'
linkTitle: "Build and Deploy a Maven Project"
weight: 11430
---
## Prerequisites
- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
- You need to create [DockerHub](http://www.dockerhub.com/) account.
- You need to create a workspace, a DevOps project, and a user account, and this account needs to be invited into the DevOps project as the role of `maintainer`.
- You need to [enable the KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
- You need to have a [Docker Hub](http://www.dockerhub.com/) account.
- You need to create a workspace, a DevOps project, and a user account, and this account needs to be invited into the DevOps project with the role of `operator`. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/).
## Workflow for Maven Project
## Workflow for a Maven Project
As is shown in the graph, there is the workflow for a Maven project in KubeSphere DevOps, which uses the pipeline of Jenkins to build and deploy the Maven project. All steps are defined in the pipeline.
As is shown in the graph below, there is the workflow for a Maven project in KubeSphere DevOps, which uses a Jenkins pipeline to build and deploy the Maven project. All steps are defined in the pipeline.
When running, Jenkins Master creates a Pod to run the pipeline. Kubernetes creates the Pod as the agent of Jenkins Master, and the Pod will be destoryed after pipeline finished. The main process is to clone code, build & push image, and deploy the workload.
![maven-project-jenkins](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/maven-project-jenkins.png)
![workflow](/images/devops/maven-project-jenkins.png)
At first, the Jenkins Master creates a Pod to run the pipeline. Kubernetes creates the Pod as the agent of Jenkins Master, and the Pod will be destroyed after the pipeline finished. The main process includes cloning code, building and pushing an image, and deploying the workload.
## Default Configurations in Jenkins
### Maven Version
### Maven version
Execute the following command in the Maven builder container to get version info.
Execute the following command in the Maven builder container to get version information.
```bash
mvn --version
@ -36,13 +36,13 @@ Java home: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.i386/jre
Default locale: en_US, platform encoding: UTF-8
```
### Maven Cache
### Maven cache
Jenkins Agent mounts the directories by Docker Volume on the node. So the pipeline can cache some spicial directory such as `/root/.m2`, which is used for the Maven building and the default cache directory for Maven tools in KubeSphere DevOps so that the dependency packages are downloaded and cached on the node.
The Jenkins Agent mounts the directories by Docker Volume on the node. The pipeline can cache some special directories such as `/root/.m2`, which are used for Maven building and the default cache directory for Maven tools in KubeSphere DevOps, so that dependency packages are downloaded and cached on the node.
### Global Maven Settings in Jenkins Agent
### Global Maven settings in the Jenkins Agent
The default Maven settings file path is `maven` and the configuration file path is `/opt/apache-maven-3.5.3/conf/settings.xml`. Execute the following command to get the content of Maven settings.
The default file path of Maven settings is `maven` and the configuration file path is `/opt/apache-maven-3.5.3/conf/settings.xml`. Execute the following command to get the content of Maven settings.
```bash
kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml
@ -50,122 +50,120 @@ kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml
### Network of Maven Pod
The Pod labeled `maven` uses the docker-in-docker network to run the pipeline. That is, the `/var/run/docker.sock` in the node is mounted into the Maven container.
The Pod labeled `maven` uses the docker-in-docker network to run the pipeline. Namely, `/var/run/docker.sock` in the node is mounted to the Maven container.
## A Maven Pipeline Example
### Prepare for the Maven Project
### Prepare for the Maven project
- ensure build the Maven project successfully on the development device.
- add the Dockerfile file into the project repo for building the image, refer to <https://github.com/kubesphere/devops-java-sample/blob/master/Dockerfile-online>.
- add the yaml file into the project repo for deploy the workload, refer to <https://github.com/kubesphere/devops-java-sample/tree/master/deploy/dev-ol>. If there are different environments, you need to prepare multiple deployment files.
- Ensure you build the Maven project successfully on the development device.
- Add the Dockerfile to the project repository to build the image. For more information, refer to <https://github.com/kubesphere/devops-java-sample/blob/master/Dockerfile-online>.
- Add the YAML file to the project repository to deploy the workload. For more information, refer to <https://github.com/kubesphere/devops-java-sample/tree/master/deploy/dev-ol>. If there are different environments, you need to prepare multiple deployment files.
### Create the Credentials
### Create credentials
- dockerhub-id. A *Account Credentials* for registry, e.g DockerHub.
- demo-kuebconfig. A *Kubeconfig Credential* for deploying workloads.
| Credential ID | Type | Use |
| --------------- | ------------------- | ---------------------------- |
| dockerhub-id | Account Credentials | Registry, such as Docker Hub |
| demo-kubeconfig | kubeconfig | Deploy workloads |
For details, please refer to the [Credentials Management](../../how-to-use/credential-management/).
For details, refer to the [Credential Management](../../how-to-use/credential-management/).
![view credential list](/images/devops/view-credential-list.png)
![view-credential-list](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-credential-list.png)
### Create the Project for Workloads
### Create a project for workloads
In this demo, all of workloads are deployed under `kubesphere-sample-dev`. So you need to create the project `kubesphere-sample-dev` in advance.
In this example, all workloads are deployed in `kubesphere-sample-dev`. You must create the project `kubesphere-sample-dev` in advance.
![view namespace](/images/devops/view-namespace.png)
![view-namespace](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-namespace.png)
### Create the Pipeline for the Maven Project
### Create a pipeline for the Maven project
At first, create a *DevOps Project* and a *Pipeline* refer to [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel).
1. Go to **Pipelines** of your DevOps project and click **Create**. For more information, see [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel).
Secondly, click *Edit Jenkinsfile* button under your pipeline.
2. Go to the detail page of the pipeline and click **Edit Jenkinsfile**.
![edit jenkinsfile](/images/devops/edit-jenkinsfile.png)
![edit-jenkinsfile](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/edit-jenkinsfile.png)
Paste the following text into the pop-up window and save it.
3. Copy and paste the following content into the pop-up window. You must replace the value of `DOCKERHUB_NAMESPACE` with yours. When you finish, save it.
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
parameters {
string(name:'TAG_NAME',defaultValue: '',description:'')
}
environment {
DOCKER_CREDENTIAL_ID = 'dockerhub-id'
KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
REGISTRY = 'docker.io'
// need to replace by yourself dockerhub namespace
DOCKERHUB_NAMESPACE = 'shaowenchen'
APP_NAME = 'devops-java-sample'
BRANCH_NAME = 'dev'
}
stages {
stage ('checkout scm') {
steps {
git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git"
}
}
stage ('unit test') {
steps {
container ('maven') {
sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test'
}
}
}
stage ('build & push') {
steps {
container ('maven') {
sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package'
sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .'
withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) {
sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER'
}
}
}
}
stage('deploy to dev') {
steps {
kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
}
}
```
parameters {
string(name:'TAG_NAME',defaultValue: '',description:'')
}
4. Save the Jenkinsfile and you can see stages and steps are automatically created on graphical editing panels.
environment {
DOCKER_CREDENTIAL_ID = 'dockerhub-id'
KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
REGISTRY = 'docker.io'
// need to replace by yourself dockerhub namespace
DOCKERHUB_NAMESPACE = 'shaowenchen'
APP_NAME = 'devops-java-sample'
BRANCH_NAME = 'dev'
}
stages {
stage ('checkout scm') {
steps {
git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git"
}
}
stage ('unit test') {
steps {
container ('maven') {
sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test'
}
}
}
stage ('build & push') {
steps {
container ('maven') {
sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package'
sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .'
withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) {
sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER'
}
}
}
}
stage('deploy to dev') {
steps {
kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
}
}
```
After saving, you will get this.
![view jenkinsfile](/images/devops/view-edit-jenkinsfile.png)
![view-edit-jenkinsfile](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-edit-jenkinsfile.png)
### Run and test
Click `run` and type `TAG_NAME` to run the pipeline.
1. Click **Run** and type `TAG_NAME` to run the pipeline.
![run maven pipeling](/images/devops/run-maven-pipeline.png)
![run-maven-pipeline](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/run-maven-pipeline.png)
After the run is complete, you can see the following figure.
2. You can see the following figure when the pipeline finished.
![view result](/images/devops/view-result-maven-pipeline.png)
![view-result-maven-pipeline](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-pipeline.png)
Under the project of `kubesphere-sample-dev`, there are new workloads created.
3. In the project of `kubesphere-sample-dev`, there are new workloads created.
![maven workload](/images/devops/view-result-maven-workload.png)
![view-result-maven-workload](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload.png)
You can view the access address of the service through service.
4. You can view the access address of the Service as below.
![maven service](/images/devops/view-result-maven-workload-svc.png)
## Summary
This document is not a getting-started document. It introduces some configurations for building Maven projects on the KubeSphere DevOps Platform. At the same time, a example flow of the Maven project is provided. In your case, you are free to add new steps to improve the pipeline.
![view-result-maven-workload-svc](/images/docs/devops-user-guide/examples/build-and-deploy-a-maven-project/view-result-maven-workload-svc.png)

View File

@ -1,128 +1,132 @@
---
title: "How to integrate Harbor in Pipeline"
keywords: 'kubernetes, docker, devops, jenkins, harbor'
description: ''
linkTitle: "Integrate Harbor in Pipeline"
title: "Integrate Harbor into Pipelines"
keywords: 'Kubernetes, Docker, DevOps, Jenkins, Harbor'
description: 'How to integrate Harbor into pipelines'
linkTitle: "Integrate Harbor into Pipelines"
weight: 11320
---
This tutorial demonstrates how to integrate Harbor into KubeSphere pipelines.
## Prerequisites
- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
- You need to create a workspace, a DevOps project, and a **project-regular** user account, and this account needs to be invited into a DevOps project. See [create-workspace-and-project](../../../../docs/quick-start/create-workspace-and-project).
- You need to have installed **Harbor** already.
- You need to [enable the KubeSphere DevOps System](../../../pluggable-components/devops/).
- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/) if they are not ready.
## Install Harbor
## Install Harbor
It is highly recommended that you install harbor [by application store](). You can also install harbor manally by helm3.
It is highly recommended that you install Harbor through [the App Store of KubeSphere](../../../application-store/built-in-apps/harbor-app/). Alternatively, install Harbor manually through Helm3.
```bash
helm repo add harbor https://helm.goharbor.io
# for qucik taste, you can expose harbor by nodeport and disable tls.
# set externalURL as one of your node ip and make sure it can be accessed by jenkins.
# For a qucik start, you can expose Harbor by nodeport and disable tls.
# Set externalURL to one of your node ip and make sure it can be accessed by jenkins.
helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL=http://$ip:30002,expose.tls.enabled=false
```
After several minutes, open your browser and visit http:$node_ip:30003. Enter **admin** and **Harbor12345** , then click **LOG** **IN**.
## Get Harbor Credentials
![](/images/devops/harbor-login.png)
1. After Harbor is installed, visit `NodeIP:30002` and log in to the console with the default account and password (`admin/Harbor12345`). Go to **Projects** and click **NEW PROJECT**.
Click **NEW** **PROJECT** , enter the project name, then click **ok**.
![harbor-projects](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/harbor-projects.jpg)
## Get Harbor credential
2. Set a name and click **OK**.
![](/images/devops/harbor-new-project.png)
![set-name](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/set-name.png)
![](/images/devops/harbor-project-ok.png)
3. Click the project you just created, and select **NEW ROBOT ACCOUNT** in **Robot Accounts**.
Click your project name you just created, find the **Robot Accounts** tab, then click **NEW ROBOT ACCOUNT**.
![robot-account](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account.png)
![](/images/devops/harbor-robot-account.png)
4. Enter the name of the robot account and save it.
Enter the name of the robot account, then save it.
![robot-account-name](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/robot-account-name.png)
![](/images/devops/harbor-robot-account-ok.png)
5. Click **EXPORT TO FILE** to save the token.
Click **EXPORT TO FILE** to save the credential.
![export-to-file](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/export-to-file.png)
![](/images/devops/harbor-robot-account-save.png)
## Create Credentials
### Create Credentials
1. Log in to KubeSphere as `project-regular`, go to your DevOps project and create credentials for Harbor in **Credentials** under **Project Management**.
Log in to KubeSphere, enter into the created DevOps project and create the following credential under **Project Management → Credentials**:
![create-credentials](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/create-credentials.png)
![](/images/devops/ks-console-create-credential.png)
2. On the **Create Credentials** page, set a credential ID and select **Account Credentials** for **Type**. The **Username** field must be the same as the value of `name` in the Json file you just downloaded and input the value of `token` in the file for **Token/Password**.
The **Username** is the name field of the json file you just saved. **Password** takes the token field.
![credentials-page](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/credentials-page.png)
![](/images/devops/ks-console-credential-ok.png)
3. Click **OK** to save it.
## Create a pipeline
## Create a Pipeline
![](/images/devops/ks-console-create-pipline.png)
1. Go to the **Pipelines** page and click **Create**. Provide the basic information in the dialog that appears and click **Next**.
Fill in the pipeline's basic information in the pop-up window, enter the name of pipelne and set the others as default value.
![basic-info](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/basic-info.png)
![](/images/devops/create-pipline-2.png)
2. Use default values in **Advanced Settings** and click **Create**.
![](/images/devops/create-pipline-3.png)
![advanced-settings](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/advanced-settings.png)
## Edit jenkins file
## Edit the Jenkinsfile
Click **Edit Jenkins File** button under your pipeline and paste the following text into the pop-up window. You need to replace **REGISTRY**, **HARBOR_NAMESPACE**, **APP_NAME**, **HARBOR_CREDENTIAL** as yours.
1. Click the pipeline to go to its detail page and click **Edit Jenkinsfile**.
```pipeline {
pipeline {
agent {
node {
label 'maven'
}
}
environment {
// the address of your harbor registry
REGISTRY = '103.61.38.55:30002'
// the project name
// make sure your robot account have enough access to the project
HARBOR_NAMESPACE = 'ks-devops-harbor'
// docker image name
APP_NAME = 'docker-example'
// yuswift is the credential id you created on ks console
HARBOR_CREDENTIAL = credentials('yuswift')
}
stages {
stage('docker login') {
steps{
container ('maven') {
// replace the username behind -u and do not forget ''
sh '''echo $HARBOR_CREDENTIAL_PSW | docker login $REGISTRY -u 'robot$yuswift2018' --password-stdin'''
}
}
}
stage('build & push') {
steps {
container ('maven') {
sh 'git clone https://github.com/kstaken/dockerfile-examples.git'
sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .'
sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test'
}
}
}
}
}
![edit-jenkinsfile](/images/docs/devops-user-guide/tool-integration/integrate-harbor-into-pipeline/edit-jenkinsfile.png)
```
2. Copy and paste the following content into the Jenkinsfile. Note that you must replace the value of `REGISTRY`, `HARBOR_NAMESPACE`, `APP_NAME`, and `HARBOR_CREDENTIAL`.
> Note:
>
> - You can pass the parameter to `docker login -u ` via jenkins credential with environment variable. However, every harbor-robot-account username contains a "\$" character, which will be converted into "\$$" by jenkins when used by environment varibles. See more about [this](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/).
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
environment {
// the address of your harbor registry
REGISTRY = '103.61.38.55:30002'
// the project name
// make sure your robot account have enough access to the project
HARBOR_NAMESPACE = 'ks-devops-harbor'
// docker image name
APP_NAME = 'docker-example'
// yuswift is the credential id you created on ks console
HARBOR_CREDENTIAL = credentials('yuswift')
}
stages {
stage('docker login') {
steps{
container ('maven') {
// replace the Docker Hub username behind -u and do not forget ''. You can also use a Docker Hub token.
sh '''echo $HARBOR_CREDENTIAL_PSW | docker login $REGISTRY -u 'robot$yuswift2018' --password-stdin'''
}
}
}
stage('build & push') {
steps {
container ('maven') {
sh 'git clone https://github.com/kstaken/dockerfile-examples.git'
sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .'
sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test'
}
}
}
}
}
```
![](/images/devops/edit-jenkins-file.png)
{{< notice note >}}
## Run the pipeline
You can pass the parameter to `docker login -u ` via Jenkins credentials with environment variables. However, every Harbor robot account's username contains a "\$" character, which will be converted into "\$$" by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/).
After you have saved the jenkins file, click the **Run** button. If everything goes well, you will see image have been pushed into your harbor registry by jenkins.
{{</ notice >}}
![](/images/devops/run-pipline.png)
## Run the Pipeline
Save the Jenkinsfile and KubeSphere automatically creates all stages and steps on the graphical editing panels. Click **Run** to execute the pipeline. If everything goes well, the image will be pushed to your Harbor registry by Jenkins.

View File

@ -44,6 +44,12 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n kubesphere-devops-system --create-namespace --set service.type=NodePort
```
{{< notice note >}}
Make sure you use Helm 3 to install SonarQube Server.
{{</ notice >}}
3. You will get this prompt:
![sonarqube-install](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-install.png)
@ -210,7 +216,7 @@ To integrate SonarQube into your pipeline, you must install SonarQube Server fir
![sonarqube-jenkins-settings](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-jenkins-settings.jpg)
### Step 6: Add sonarqubeUrl to the KubeSphere Console
### Step 6: Add sonarqubeURL to the KubeSphere Console
You need to specify `sonarqubeURL` so that you can access SonarQube directly from the KubeSphere console.

View File

@ -333,11 +333,11 @@ This stage uses SonarQube to test your code. You can skip this stage if you do n
![pipeline-successful](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-graphical-editing-panels/pipeline-successful.jpg)
4. Log out of KubeSphere and log back in the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**.
4. Log out of KubeSphere and log back in to the console as `project-admin`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record to be reviewed. To approve the pipeline, click **Proceed**.
### Step 6: View pipeline details
1. Log back in the console as `project-regular`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record marked with **Success** under **Status**.
1. Log in to the console as `project-regular`. Go to your DevOps project and click the pipeline `graphical-pipeline`. Under the **Activity** tab, click the record marked with **Success** under **Status**.
2. If everything runs successfully, you can see that all stages are completed.

View File

@ -61,7 +61,7 @@ There are eight stages as shown below in this example pipeline.
| github-id | Account Credentials | GitHub |
| demo-kubeconfig | kubeconfig | Kubernetes |
2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project) to use the token for the **secret** field below. Click **OK** to finish.
2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-a-sonarqube-token-for-a-new-project) to use the token for the **secret** field below. Click **OK** to finish.
![sonar-token](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg)
@ -121,7 +121,7 @@ The account `project-admin` needs to be created in advance since it is the revie
| kubesphere-sample-dev | development environment |
| kubesphere-sample-prod | production environment |
2. Check the project list. You have two projects and one DevOps project as below:
2. After those projects are created, they will be listed in the project list as below:
![project-list](/images/docs/devops-user-guide/using-devops/create-a-pipeline-using-a-jenkinsfile/project-list.jpg)
@ -207,8 +207,8 @@ The account `project-admin` needs to be created in advance since it is the revie
{{< notice note >}}
- If you do need see any activity on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button).
- The tag name is used to generate releases and images with the tag in GitHub and Docker Hub. An existing tag name cannot be used again for the field TAG_NAME. Otherwise, the pipeline will not be running successfully.
- If you do not see any activity on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button).
- The tag name is used to generate releases and images with the tag in GitHub and Docker Hub. An existing tag name cannot be used again for the field `TAG_NAME`. Otherwise, the pipeline will not be running successfully.
{{</ notice >}}
@ -246,13 +246,13 @@ The account `project-admin` needs to be created in advance since it is the revie
### Step 7: Verify results
1. Once you successfully executed the pipeline, click **Code Quality** to check the results through SonarQube as follows.
1. Once you successfully executed the pipeline, click **Code Quality** to check the results through SonarQube as follows.
![sonarqube-result-detail-1.jpg](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-result-detail-1.jpg.jpg)
![sonarqube-result-detail](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/sonarqube-result-detail.jpg)
2. The Docker image built through the pipeline has also been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs.
2. The Docker image built through the pipeline has also been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs.
![docker-hub-result](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/docker-hub-result.jpg)
@ -260,7 +260,7 @@ The account `project-admin` needs to be created in advance since it is the revie
![github-result](/images/docs/devops-user-guide/tool-integration/integrate-sonarqube-into-pipeline/github-result.jpg)
4. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. Go to these two projects and here are the expected result:
4. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. Go to these two projects and here are the expected result:
| Environment | URL | Namespace | Deployment | Service |
| :--- | :--- | :--- | :--- | :--- |

View File

@ -11,7 +11,7 @@ The built-in Jenkins cannot share the same email configuration with the platform
## Prerequisites
- You need to enable [KubeSphere DevOps System](../../../pluggable-components/devops/).
- You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/).
- You need an account granted a role including the authorization of **Clusters Management**. For example, you can log in to the console as `admin` directly or create a new role with the authorization and assign it to an account.
## Set the Email Server

View File

@ -5,3 +5,5 @@ description: ''
linkTitle: "Pipeline Settings"
weight: 11280
---
TBD.

View File

@ -44,7 +44,7 @@ A pipeline entails a collection of plugins that allow you to constantly and cons
### Credentials
A DevOps project user with required permissions can configure credentials for pipelines for the interaction with external environments. Once the user adds these credentials in a DevOps project, the credentials can be used by the DevOps project to interact with third-party applications, such as GitHub, GitLab and Docker Hub. For more information, see [Credential Management](../credential-management/).
A DevOps project user with required permissions can configure credentials for pipelines for the interaction with external environments. Once the user adds these credentials in a DevOps project, the credentials can be used by the DevOps project to interact with third-party applications, such as GitHub, GitLab and Docker Hub. For more information, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/).
### Members and roles

View File

@ -6,7 +6,7 @@ linkTitle: "Role and Member Management"
weight: 11130
---
This guide demonstrates how to manage roles and members in your DevOps project. For more information about KubeSphere roles, see Overview of Role Management.
This guide demonstrates how to manage roles and members in your DevOps project.
In DevOps project scope, you can grant the following resources' permissions to a role:

View File

@ -0,0 +1,143 @@
---
title: "Account Login Failure"
keywords: "login failure, account is not active, KubeSphere, Kubernetes"
description: "How to solve the issue of login failure"
linkTitle: "Account Login Failure"
Weight: 16440
---
KubeSphere automatically creates a default account (`admin/P@88w0rd`) when it is installed. After `ks-controller-manager` synchronizes statuses of accounts to OpenLDAP and Jenkins, accounts and passwords will be encrypted. An account cannot be used for login until the status reaches `Active`.
Here are some of the frequently asked questions about account login failure.
## Account Not Active
You may see an image below when the login fails. To find out the reason and solve the issue, perform the following steps:
![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/account-not-active.png)
1. Execute the following command to check the status of your account.
```bash
$ kubectl get users
NAME EMAIL STATUS
admin admin@kubesphere.io Active
```
2. Verify that `ks-controller-manager` is running and check if exceptions are contained in logs:
```bash
kubectl -n kubesphere-system logs -l app=ks-controller-manager
```
Here are some possible reasons for this issue.
### Admission webhooks malfunction in Kubernetes 1.19
Kubernetes 1.19 uses Golang 1.15 in coding, requiring the certificate for admission webhooks to be updated. This causes the failure of `ks-controller` admission webhook.
Related error logs:
```bash
Internal error occurred: failed calling webhook "validating-user.kubesphere.io": Post "https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=30s": x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0
```
For more information about the issue and solution, see this [GitHub issue](https://github.com/kubesphere/kubesphere/issues/2928).
### ks-controller-manager malfunctions
`ks-controller-manager` relies on two stateful Services: OpenLDAP and Jenkins. When OpenLDAP or Jenkins goes down, `ks-controller-manager` will be in the status of `reconcile`.
Execute the following commands to verify that OpenLDAP and Jenkins are running normally.
```
kubectl -n kubesphere-devops-system get po | grep -v Running
kubectl -n kubesphere-system get po | grep -v Running
kubectl -n kubesphere-system logs -l app=openldap
```
Related error logs:
```bash
failed to connect to ldap service, please check ldap status, error: factory is not able to fill the pool: LDAP Result Code 200 \"Network Error\": dial tcp: lookup openldap.kubesphere-system.svc on 169.254.25.10:53: no such host
```
```bash
Internal error occurred: failed calling webhook “validating-user.kubesphere.io”: Post https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=4s: context deadline exceeded
```
#### Solution
You need to restore OpenLDAP and Jenkins with good network connection. Restarting `ks-controller-manager` will trigger `reconcile` immediately. When the connection to OpenLDAP or Jenkins fails, the retry interval will increase.
```
kubectl -n kubesphere-system rollout restart deploy ks-controller-manager
```
### Wrong code branch used
If you used the incorrect version of ks-installer, the versions of different components would not match after the installation. Execute the following commands to check version consistency. Note that the correct image tag is `v3.0.0`.
```
kubectl -n kubesphere-system get deploy ks-installer -o jsonpath='{.spec.template.spec.containers[0].image}'
kubectl -n kubesphere-system get deploy ks-apiserver -o jsonpath='{.spec.template.spec.containers[0].image}'
kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spec.template.spec.containers[0].image}'
```
## Wrong Username or Password
![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/wrong-password.png)
`ks-console` and `ks-apiserver` use Redis to share data across multiple copies. When Redis fails, the copies of `ks-console` will not be able to share the salt required for password encryption and transmission.
Run the following command to verify that the account and the password are correct.
```
curl -u <USERNAME>:<PASSWORD> "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes"
```
### Redis failure
Use the following commands to verify that Redis is running normally.
```
kubectl -n kubesphere-system logs -l app=ks-console
kubectl -n kubesphere-system get po | grep -v Running
# High Availability
kubectl -n kubesphere-system exec -it redis-ha-server-0 redis-cli info replication
kubectl -n kubesphere-system exec -it redis-ha-server-0 -- sh -c 'for i in `seq 0 2`; do nc -vz redis-ha-server-$i.redis-ha.kubesphere-system.svc 6379; done'
kubectl -n kubesphere-system logs -l app=redis-ha-haproxy
kubectl -n kubesphere-system logs -l app=redis-ha
# Single Replica
kubectl -n kubesphere-system logs -l app=redis
```
Related error logs:
```bash
1344:C 17 Sep 2020 17:13:18.099 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle
1:M 17 Sep 2020 17:13:18.198 # Background saving error
1:M 17 Sep 2020 17:13:24.014 * 1 changes in 3600 seconds. Saving...
1:M 17 Sep 2020 17:13:24.015 * Background saving started by pid 1345
1345:C 17 Sep 2020 17:13:24.016 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle
1:M 17 Sep 2020 17:13:24.115 # Background saving error
```
```bash
E0909 07:05:22.770468 1 redis.go:51] unable to reach redis host EOF
```
```bash
[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R0 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R1 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R2 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[ALERT] 252/094143 (6) : backend 'check_if_redis_is_master_0' has no server available!
```
#### Solution
You need to restore Redis and make sure it is running normally with good network connection between Pods. After that, restart `ks-console` to synchronize the data across copies.
```
kubectl -n kubesphere-system rollout restart deploy ks-console
```

View File

@ -0,0 +1,40 @@
---
title: "SSH Connection Failure"
keywords: "Installation, SSH, KubeSphere, Kubernetes"
description: "SSH Connection Failure"
linkTitle: "SSH Connection Failure"
Weight: 16600
---
When you use KubeKey to set up a cluster, you create a configuration file which contains necessary host information. Here is an example of the field `hosts`:
```bash
spec:
hosts:
- {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, user: ubuntu, password: Testing123}
- {name: node1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, password: Testing123}
- {name: node2, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123}
```
Before you start to use the `./kk` command to create your cluster, it is recommended that you test the connection between the taskbox and other instances using SSH.
## Possible Error Message
```bash
Failed to connect to xx.xxx.xx.xxx: could not establish connection to xx.xxx.xx.xxx:xx: ssh: handshake failed: ssh: unable to authenticate , attempted methods [none], no supported methods remain node=xx.xxx.xx.xxx
```
If you see an error message as above, verify that:
- You are using the correct port number. Port `22` is the default port of SSH and you need to add the port number after the IP address if your port is different. For example:
```bash
hosts:
- {name: master, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 8022, user: ubuntu, password: Testing123}
```
- SSH connections are not restricted in `/etc/ssh/sshd_config`. For example, `PasswordAuthentication` should be set to `true`.
- You are using the correct username, password or key. Note that the user must have sudo privileges.
- Your firewall configurations allow SSH connections.

View File

@ -0,0 +1,141 @@
---
title: "Uninstall Pluggable Components from KubeSphere"
keywords: "Installer, uninstall, KubeSphere, Kubernetes"
description: "How to uninstall pluggable components from KubeSphere"
linkTitle: "Uninstall Pluggable Components from KubeSphere"
Weight: 16500
---
After you [enable pluggable components of KubeSphere](../../../pluggable-components/), you can also uninstall them using the following commands. Please back up any necessary data before you uninstall them.
## App Store
```bash
kubectl delete ns openpitrix-system
```
## Metrics Server
```bash
helm delete metrics-server -n kube-system
```
## Events
```bash
helm delete ks-events -n kubesphere-logging-system
```
## Auditing
```bash
helm delete kube-auditing -n kubesphere-logging-system
```
## Logging
```bash
kubectl delete ns kubesphere-logging-system
```
Optional:
```bash
# Uninstall es and curator
helm uninstall -n kubesphere-logging-system elasticsearch-logging
helm uninstall -n kubesphere-logging-system elasticsearch-logging-curator
# Uninstall fluent bit operator and fluent bit
kubectl delete -f https://github.com/kubesphere/fluentbit-operator/tree/v0.2.0/manifests/logging-stack
kubectl delete -f https://github.com/kubesphere/fluentbit-operator/tree/v0.2.0/manifests/setup
# Uninstall log sidecar injector
helm uninstall -n kubesphere-logging-system logsidecar-injector
```
## Alerting and Notification
```bash
kubectl delete ns kubesphere-alerting-system
```
To uninstall **alerting** only:
```bash
kubectl delete deployment -n kubesphere-alerting-system alerting-client alerting-executor alerting-manager alerting-watcher
kubectl delete svc -n kubesphere-alerting-system alerting-client-server alerting-manager-server
```
To uninstall **notification** only:
```bash
kubectl delete deployment -n kubesphere-alerting-system notification-deployment
kubectl delete svc -n kubesphere-alerting-system notification
```
{{< notice note >}}
Alerting and notification are often enabled at the same time, which run together in the namespace `kubesphere-alerting-system`.
{{</ notice >}}
## Service Mesh
```bash
helm -n istio-system delete istio-init
helm -n istio-system delete istio
helm -n istio-system delete jaeger-operator
kubectl delete ns istio-system
```
## DevOps
```bash
helm -n kubesphere-devops-system delete ks-jenkins
helm -n kubesphere-devops-system delete uc
```
```bash
# Delete DevOps projects
for devopsproject in `kubectl get devopsprojects -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch devopsprojects $devopsproject -p '{"metadata":{"finalizers":null}}' --type=merge
done
for pip in `kubectl get pipeline -A -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch pipeline $pip -n `kubectl get pipeline -A | grep $pip | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done
for s2ibinaries in `kubectl get s2ibinaries -A -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch s2ibinaries $s2ibinaries -n `kubectl get s2ibinaries -A | grep $s2ibinaries | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done
for s2ibuilders in `kubectl get s2ibuilders -A -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch s2ibuilders $s2ibuilders -n `kubectl get s2ibuilders -A | grep $s2ibuilders | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done
for s2ibuildertemplates in `kubectl get s2ibuildertemplates -A -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch s2ibuildertemplates $s2ibuildertemplates -n `kubectl get s2ibuildertemplates -A | grep $s2ibuildertemplates | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done
for s2iruns in `kubectl get s2iruns -A -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch s2iruns $s2iruns -n `kubectl get s2iruns -A | grep $s2iruns | awk '{print $1}'` -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete devopsprojects --all 2>/dev/null
```
```bash
kubectl delete ns kubesphere-devops-system
```
{{< notice note >}}
For the component NetworkPolicy, disabling it does not require uninstalling the component as its controller is now inside `ks-controller-manager`. If you want to remove it from the KubeSphere console, change `networkPolicy.enabled` to `false` in `ks-installer`.
{{</ notice >}}

View File

@ -10,7 +10,7 @@ KubeSphere comes with several pre-installed customized monitoring components inc
## Steps to Bring Your Own Prometheus
To use your own Prometheus stack setup, the steps are listed as below:
To use your own Prometheus stack setup, perform the following steps:
1. Uninstall the customized Prometheus stack of KubeSphere

View File

@ -16,26 +16,25 @@ This tutorial demonstrates how to add new nodes to a single-node cluster. To sca
- You have [downloaded KubeKey](../../../installing-on-linux/introduction/multioverview/#step-2-download-kubekey).
## Add New Nodes
## Add Worker Nodes
### Step 1: Modify host configurations
1. Create a configuration file (`config-sample.yaml`) using KubeKey.
1. Retrieve your cluster information using KubeKey. The command below creates a configuration file (`sample.yaml`).
```bash
# Assume your original Kubernetes cluster is v1.17.9
./kk create config --with-kubesphere v3.0.0 --with-kubernetes v1.17.9
./kk create config --from-cluster
```
{{< notice note >}}
You can skip this step if you already have the configuration file on your machine. For example, if you want to add nodes to a multi-node cluster which was set up by KubeKey, you might still have the configuration file if you have not deleted it.
You can skip this step if you already have the configuration file on your machine. For example, if you want to add nodes to a multi-node cluster which was set up by KubeKey, you might still have the configuration file if you have not deleted it.
{{</ notice >}}
{{</ notice >}}
2. In the configuration file, put the information of your new nodes under `hosts` and `roleGroups`. The example adds two new nodes (i.e. `node1` and `node2`). Here `master1` is the existing node.
```bash
···
spec:
hosts:
- {name: master1, address: 192.168.0.3, internalAddress: 192.168.0.3, user: root, password: Qcloud@123}
@ -51,24 +50,21 @@ This tutorial demonstrates how to add new nodes to a single-node cluster. To sca
- node2
···
```
{{< notice note >}}
- For more information about the configuration file, see [Edit the configuration file](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file).
- You are not allowed to modify the host name of existing nodes when adding new nodes.
- Replace the host name in the example with your own.
{{</ notice >}}
### Step 2: Apply the configuration to add nodes
1. Execute the following command:
- For more information about the configuration file, see [Edit the configuration file](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file).
- You are not allowed to modify the host name of existing nodes when adding new nodes.
- Replace the host name in the example with your own.
{{</ notice >}}
3. Execute the following command:
```bash
./kk add nodes -f config-sample.yaml
./kk add nodes -f sample.yaml
```
2. You will be able to see the new nodes and their information on the KubeSphere console when the installation finishes. On the **Cluster Management** page, select **Cluster Nodes** under **Nodes** from the left menu, or execute the command `kubectl get node` to check the changes.
4. You will be able to see the new nodes and their information on the KubeSphere console when the installation finishes. On the **Cluster Management** page, select **Cluster Nodes** under **Nodes** from the left menu, or execute the command `kubectl get node` to check the changes.
```bash
$ kubectl get node
@ -76,4 +72,84 @@ This tutorial demonstrates how to add new nodes to a single-node cluster. To sca
master1 Ready master,worker 20d v1.17.9
node1 Ready worker 31h v1.17.9
node2 Ready worker 31h v1.17.9
```
## Add Master Nodes for High Availability
The steps of adding master nodes are generally the same as adding worker nodes while you need to configure a load balancer for your cluster. You can use any cloud load balancers or hardware load balancers (e.g. F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating highly available clusters.
1. Create a configuration file using KubeKey.
```
./kk create config --from-cluster
```
2. Open the file and you can see some fields are pre-populated with values. Add the information of new nodes and your load balancer to the file. Here is an example for your reference:
```yaml
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
name: sample
spec:
hosts:
# You should complete the ssh information of the hosts
- {name: master1, address: 172.16.0.2, internalAddress: 172.16.0.2, user: root, password: Testing123}
- {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123}
- {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123}
- {name: worker1, address: 172.16.0.3, internalAddress: 172.16.0.3, user: root, password: Testing123}
- {name: worker2, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123}
- {name: worker3, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123}
roleGroups:
etcd:
- master1
- master2
- master3
master:
- master1
- master2
- master3
worker:
- worker1
- worker2
- worker3
controlPlaneEndpoint:
# If loadbalancer is used, 'address' should be set to loadbalancer's ip.
domain: lb.kubesphere.local
address: 172.16.0.253
port: 6443
kubernetes:
version: v1.17.9
imageRepo: kubesphere
clusterName: cluster.local
proxyMode: ipvs
masqueradeAll: false
maxPods: 110
nodeCidrMaskSize: 24
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
registry:
privateRegistry: ""
```
3. Pay attention to the `controlPlaneEndpoint` field.
```yaml
controlPlaneEndpoint:
# If you use a load balancer, the address should be set to the load balancer's ip.
domain: lb.kubesphere.local
address: 172.16.0.253
port: 6443
```
- The domain name of the load balancer is `lb.kubesphere.local` by default for internal access. You can change it based on your needs.
- In most cases, you need to provide the **private IP address** of the load balancer for the field `address`. However, different cloud providers may have different configurations for load balancers. For example, if you configure a Server Load Balancer (SLB) on Alibaba Cloud, the platform assigns a public IP address to the SLB, which means you need to specify the public IP address for the field `address`.
- The field `port` indicates the port of `api-server`.
4. Save the file and execute the following command to apply the configuration.
```bash
./kk add nodes -f sample.yaml
```

View File

@ -22,8 +22,14 @@ Pods that are part of a DaemonSet tolerate being run on an unschedulable node. D
## Delete a Node
You can delete the node using [KubeKey](https://github.com/kubesphere/kubekey) by the following command. The `config-sample.yaml` file is the one created when you [set up the cluster](../../introduction/multioverview/#1-create-an-example-configuration-file).
1. To delete a node, you need to prepare the configuration file of your cluster first, which is the one created when you [set up your cluster](../../introduction/multioverview/#1-create-an-example-configuration-file). If you do not have it, use [KubeKey](https://github.com/kubesphere/kubekey) to retrieve cluster information (a file `sample.yaml` will be created by default).
```bash
./kk delete node <nodeName> -f config-sample.yaml
```
```bash
./kk create config --from-cluster
```
2. Make sure you provide all the information of your hosts in the configuration file and run the following command to delete a node.
```bash
./kk delete node <nodeName> -f sample.yaml
```

View File

@ -0,0 +1,7 @@
---
linkTitle: "High Availability Configurations"
weight: 3200
_build:
render: false
---

View File

@ -1,9 +1,9 @@
---
title: "High Availability Configurations"
title: "Set up an HA Cluster Using a Load Balancer"
keywords: 'KubeSphere, Kubernetes, HA, high availability, installation, configuration'
description: 'How to configure a high-availability Kubernetes cluster.'
linkTitle: "High Availability Configurations"
weight: 3150
linkTitle: "Set up an HA Cluster Using a Load Balancer"
weight: 3210
---
You can set up a single-master Kubernetes cluster with KubeSphere installed based on the tutorial of [Multi-node Installation](../multioverview/). Single-master clusters may be sufficient for development and testing in most cases. For a production environment, however, you need to consider the high availability of the cluster. If key components (for example, kube-apiserver, kube-scheduler, and kube-controller-manager) are all running on the same master node, Kubernetes and KubeSphere will be unavailable once the master node goes down. Therefore, you need to set up a high-availability cluster by provisioning load balancers with multiple master nodes. You can use any cloud load balancer, or any hardware load balancer (e.g. F5). In addition, Keepalived and [HAproxy](https://www.haproxy.com/), or Nginx is also an alternative for creating high-availability clusters.
@ -14,7 +14,7 @@ This tutorial demonstrates the general configurations of a high-availability clu
Make sure you have prepared six Linux machines before you begin, with three of them serving as master nodes and the other three as worker nodes. The following image shows details of these machines, including their private IP address and role. For more information about system and network requirements, see [Multi-node Installation](../multioverview/#step-1-prepare-linux-hosts).
![ha-architecture](/images/docs/installing-on-linux/introduction/ha-configurations/ha-architecture.png)
![ha-architecture](/images/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-lb/ha-architecture.png)
## Configure a Load Balancer

View File

@ -0,0 +1,413 @@
---
title: "Set up an HA Cluster Using Keepalived and HAproxy"
keywords: 'KubeSphere, Kubernetes, HA, high availability, installation, configuration, Keepalived, HAproxy'
description: 'How to configure a high-availability Kubernetes cluster using Keepalived and HAproxy.'
linkTitle: "Set up an HA Cluster Using Keepalived and HAproxy"
weight: 3220
---
A highly available Kubernetes cluster ensures your applications run without outages which is required for production. In this connection, there are plenty of ways for you to choose from to achieve high availability.
This tutorial demonstrates how to configure Keepalived and HAproxy for load balancing and achieve high availability. The steps are listed as below:
1. Prepare hosts.
2. Configure Keepalived and HAproxy.
3. Use KubeKey to set up a Kubernetes cluster and install KubeSphere.
## Cluster Architecture
The example cluster has three master nodes, three worker nodes, two nodes for load balancing and one virtual IP address. The virtual IP address in this example may also be called "a floating IP address". That means in the event of node failures, the IP address can be passed between nodes allowing for failover, thus achieving high availability.
![architecture-ha-k8s-cluster](/images/docs/installing-on-linux/high-availability-configurations/set-up-ha-cluster-using-keepalived-haproxy/architecture-ha-k8s-cluster.png)
Notice that in this example, Keepalived and HAproxy are not installed on any of the master nodes. Admittedly, you can do that and high availability can also be achieved. That said, configuring two specific nodes for load balancing (You can add more nodes of this kind as needed) is more secure. Only Keepalived and HAproxy will be installed on these two nodes, avoiding any potential conflicts with any Kubernetes components and services.
## Prepare Hosts
| IP Address | Hostname | Role |
| ----------- | -------- | -------------------- |
| 172.16.0.2 | lb1 | Keepalived & HAproxy |
| 172.16.0.3 | lb2 | Keepalived & HAproxy |
| 172.16.0.4 | master1 | master, etcd |
| 172.16.0.5 | master2 | master, etcd |
| 172.16.0.6 | master3 | master, etcd |
| 172.16.0.7 | worker1 | worker |
| 172.16.0.8 | worker2 | worker |
| 172.16.0.9 | worker3 | worker |
| 172.16.0.10 | | Virtual IP address |
For more information about requirements for nodes, network, and dependencies, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#step-1-prepare-linux-hosts).
## Configure Load Balancing
[Keepalived](https://www.keepalived.org/) provides a VRPP implementation and allows you to configure Linux machines for load balancing, preventing single points of failure. [HAProxy](http://www.haproxy.org/), providing reliable, high performance load balancing, works perfectly with Keepalived.
As Keepalived and HAproxy are installed on `lb1` and `lb2`, if either one goes down, the virtual IP address (i.e. the floating IP address) will be automatically associated with another node so that the cluster is still functioning well, thus achieving high availability. If you want, you can add more nodes all with Keepalived and HAproxy installed for that purpose.
Run the following command to install Keepalived and HAproxy first.
```bash
yum install keepalived haproxy psmisc -y
```
### HAproxy
1. The configuration of HAproxy is exactly the same on the two machines for load balancing. Run the following command to configure HAproxy.
```bash
vi /etc/haproxy/haproxy.cfg
```
2. Here is an example configuration for your reference (Pay attention to the `server` field. Note that `6443` is the `apiserver` port):
```bash
global
log /dev/log local0 warning
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend kube-apiserver
bind *:6443
mode tcp
option tcplog
default_backend kube-apiserver
backend kube-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 172.16.0.4:6443 check # Replace the IP address with your own.
server kube-apiserver-2 172.16.0.5:6443 check # Replace the IP address with your own.
server kube-apiserver-3 172.16.0.6:6443 check # Replace the IP address with your own.
```
3. Save the file and run the following command to restart HAproxy.
```bash
systemctl restart haproxy
```
4. Make it persist through reboots:
```bash
systemctl enable haproxy
```
5. Make sure you configure HAproxy on the other machine (`lb2`) as well.
### Keepalived
Keepalived must be installed on both machines while the configuration of them is slightly different.
1. Run the following command to configure Keepalived.
```bash
vi /etc/keepalived/keepalived.conf
```
2. Here is an example configuration (`lb1`) for your reference:
```bash
global_defs {
notification_email {
}
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 2
}
vrrp_instance haproxy-vip {
state BACKUP
priority 100
interface eth0 # Network card
virtual_router_id 60
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
unicast_src_ip 172.16.0.2 # The IP address of this machine
unicast_peer {
172.16.0.3 # The IP address of peer machines
}
virtual_ipaddress {
172.16.0.10/24 # The VIP address
}
track_script {
chk_haproxy
}
}
```
{{< notice note >}}
- For the `interface` field, you must provide your own network card information. You can run `ifconfig` on your machine to get the value.
- The IP address provided for `unicast_src_ip` is the IP address of your current machine. For other machines where HAproxy and Keepalived are also installed for load balancing, their IP address must be input for the field `unicast_peer`.
{{</ notice >}}
3. Save the file and run the following command to restart Keepalived.
```bash
systemctl restart keepalived
```
4. Make it persist through reboots:
```bash
systemctl enable haproxy
```
5. Make sure you configure Keepalived on the other machine (`lb2`) as well.
## Verify High Availability
Before you start to create your Kubernetes cluster, make sure you have tested the high availability.
1. On the machine `lb1`, run the following command:
```bash
[root@lb1 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff
inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 73334sec preferred_lft 73334sec
inet 172.16.0.10/24 scope global secondary eth0 # The VIP address
valid_lft forever preferred_lft forever
inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
2. As you can see above, the virtual IP address is successfully added. Simulate a failure on this node:
```bash
systemctl stop haproxy
```
3. Check the floating IP address again and you can see it disappear on `lb1`.
```bash
[root@lb1 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:27:38:c8 brd ff:ff:ff:ff:ff:ff
inet 172.16.0.2/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 72802sec preferred_lft 72802sec
inet6 fe80::510e:f96:98b2:af40/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
4. Theoretically, the virtual IP will be failed over to the other machine (`lb2`) if the configuration is successful. On `lb2`, run the following command and here is the expected output:
```bash
[root@lb2 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 52:54:9e:3f:51:ba brd ff:ff:ff:ff:ff:ff
inet 172.16.0.3/24 brd 172.16.0.255 scope global noprefixroute dynamic eth0
valid_lft 72690sec preferred_lft 72690sec
inet 172.16.0.10/24 scope global secondary eth0 # The VIP address
valid_lft forever preferred_lft forever
inet6 fe80::f67c:bd4f:d6d5:1d9b/64 scope link noprefixroute
valid_lft forever preferred_lft forever
```
5. As you can see above, high availability is successfully configured.
## Use KubeKey to Create a Kubernetes Cluster
[KubeKey](https://github.com/kubesphere/kubekey) is an efficient and convenient tool to create a Kubernetes cluster. Follow the steps below to download KubeKey.
{{< tabs >}}
{{< tab "Good network connections to GitHub/Googleapis" >}}
Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly.
```bash
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.0.1 sh -
```
{{</ tab >}}
{{< tab "Poor network connections to GitHub/Googleapis" >}}
Run the following command first to make sure you download KubeKey from the correct zone.
```bash
export KKZONE=cn
```
Run the following command to download KubeKey:
```bash
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.0.1 sh -
```
{{< notice note >}}
After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below.
{{</ notice >}}
{{</ tab >}}
{{</ tabs >}}
{{< notice note >}}
The commands above download the latest release (v1.0.1) of KubeKey. You can change the version number in the command to download a specific version.
{{</ notice >}}
Make `kk` executable:
```bash
chmod +x kk
```
Create an example configuration file with default configurations. Here Kubernetes v1.17.9 is used as an example.
```bash
./kk create config --with-kubesphere v3.0.0 --with-kubernetes v1.17.9
```
{{< notice note >}}
- Kubernetes versions that have been fully tested with KubeSphere: v1.15.12, v1.16.13, v1.17.9 (default), and v1.18.6.
- If you do not add the flag `--with-kubesphere` in the command in this step, KubeSphere will not be deployed unless you install it using the `addons` field in the configuration file or add this flag again when you use `./kk create cluster` later.
- If you add the flag `--with-kubesphere` without specifying a KubeSphere version, the latest version of KubeSphere will be installed.
{{</ notice >}}
## Deploy KubeSphere and Kubernetes
After you run the commands above, a configuration file `config-sample.yaml` will be created. Edit the file to add machine information, configure the load balancer and more.
{{< notice note >}}
The file name may be different if you customize it.
{{</ notice >}}
### config-sample.yaml example
```yaml
...
spec:
hosts:
- {name: master1, address: 172.16.0.4, internalAddress: 172.16.0.4, user: root, password: Testing123}
- {name: master2, address: 172.16.0.5, internalAddress: 172.16.0.5, user: root, password: Testing123}
- {name: master3, address: 172.16.0.6, internalAddress: 172.16.0.6, user: root, password: Testing123}
- {name: worker1, address: 172.16.0.7, internalAddress: 172.16.0.7, user: root, password: Testing123}
- {name: worker2, address: 172.16.0.8, internalAddress: 172.16.0.8, user: root, password: Testing123}
- {name: worker3, address: 172.16.0.9, internalAddress: 172.16.0.9, user: root, password: Testing123}
roleGroups:
etcd:
- master1
- master2
- master3
master:
- master1
- master2
- master3
worker:
- worker1
- worker2
- worker3
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: 172.16.0.10 # The VIP address
port: 6443
...
```
{{< notice note >}}
- Replace the value of `controlPlaneEndpoint.address` with your own VIP address.
- For more information about different parameters in this configuration file, see [Multi-node Installation](../../../installing-on-linux/introduction/multioverview/#2-edit-the-configuration-file).
{{</ notice >}}
### Start installation
After you complete the configuration, you can execute the following command to start the installation:
```bash
./kk create cluster -f config-sample.yaml
```
### Verify installation
1. Run the following command to inspect the logs of installation.
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
2. When you see the following message, it means your HA cluster is successfully created.
```bash
#####################################################
### Welcome to KubeSphere! ###
#####################################################
Console: http://172.16.0.4:30880
Account: admin
Password: P@88w0rd
NOTES
1. After logging into the console, please check the
monitoring status of service components in
the "Cluster Management". If any service is not
ready, please wait patiently until all components
are ready.
2. Please modify the default password after login.
#####################################################
https://kubesphere.io 2020-xx-xx xx:xx:xx
#####################################################
```

View File

@ -3,7 +3,7 @@ title: "Air-gapped Installation"
keywords: 'Air-gapped, Installation, KubeSphere'
description: 'How to install KubeSphere in an air-gapped environment.'
linkTitle: "Air-gapped Installation"
weight: 3130
weight: 3140
---
The air-gapped installation is almost the same as the online installation except that you must create a local registry to host Docker images. This tutorial demonstrates how to install KubeSphere and Kubernetes in an air-gapped environment.

View File

@ -15,7 +15,7 @@ Here is a list of available installation options.
- [All-in-one installation](../../../quick-start/all-in-one-on-linux/): Install KubeSphere on a single node. It is only for users to quickly get familiar with KubeSphere.
- [Multi-node installation](../multioverview/): Install KubeSphere on multiple nodes. It is for testing or development.
- [Air-gapped installation on Linux](../air-gapped-installation): All images of KubeSphere have been encapsulated into a package. It is convenient for air-gapped installation on Linux machines.
- [High availability installation](../ha-configuration/): Install a highly-available KubeSphere cluster with multiple nodes which is used for the production environment.
- [High availability installation](../../../installing-on-linux/high-availability-configurations/ha-configuration/): Install a highly-available KubeSphere cluster with multiple nodes which is used for the production environment.
- Minimal Packages: Only install the minimum required system components of KubeSphere. Here is the minimum resource requirement:
- 2 CPUs
- 4GB RAM
@ -40,22 +40,13 @@ If you have an existing Kubernetes cluster, see [Overview of Installing on Kuber
## KubeKey
Developed in Go language, KubeKey represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides users with flexible installation choices, as they can install KubeSphere and Kubernetes separately or install them together, which is convenient and efficient.
There are several scenarios to use KubeKey:
- Install Kubernetes only;
- Install Kubernetes and KubeSphere together in one command;
- Install Kubernetes first, and deploy KubeSphere on it using [ks-installer](https://github.com/kubesphere/ks-installer);
- Scale a cluster;
- Upgrade a cluster;
- Install Kubernetes-related add-ons (Chart or YAML).
[KubeKey](https://github.com/kubesphere/kubekey) provides an efficient approach to the installation and configuration of your cluster. You can use it to create, scale, and upgrade your Kubernetes cluster. It also allows you to install cloud-native add-ons (YAML or Chart) as you set up your cluster. For more information, see [KubeKey](../kubekey).
## Quick Installation for Development and Testing
KubeSphere has decoupled some components since v2.1.0. KubeKey only installs necessary components by default as this way features fast installation and minimal resource consumption. If you want to enable enhanced pluggable functionalities, see [Enable Pluggable Components](../../../pluggable-components/) for details.
The quick installation of KubeSphere is only for development or testing since it uses [Local Volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) based on [openEBS](https://openebs.io/) to provide storage services by default. If you want a production installation, see [High Availability Configurations](../ha-configuration/).
The quick installation of KubeSphere is only for development or testing since it uses [Local Volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) based on [openEBS](https://openebs.io/) to provide storage services by default. If you want a production installation, see [High Availability Configurations](../../../installing-on-linux/high-availability-configurations/ha-configuration/).
## Overview of Pluggable Components

View File

@ -0,0 +1,74 @@
---
title: "KubeKey"
keywords: 'KubeKey, Installation, KubeSphere'
description: 'Understand KubeKey'
linkTitle: "KubeKey"
weight: 3120
---
Developed in Go, [KubeKey](https://github.com/kubesphere/kubekey) represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides you with flexible installation choices, as you can install Kubernetes only or install both Kubernetes and KubeSphere.
There are several scenarios to use KubeKey:
- Install Kubernetes only;
- Install Kubernetes and KubeSphere together in one command;
- Scale a cluster;
- Upgrade a cluster;
- Install Kubernetes-related add-ons (Chart or YAML).
## How Does KubeKey Work
After you download KubeKey, you use an executable called `kk` to perform different operations. No matter you use it to create, scale or upgrade a cluster, you must prepare a configuration file using `kk` beforehand. This configuration file contains basic parameters of your cluster, such as host information, network configurations (CNI plugin and Pod and Service CIDR), registry mirrors, add-ons (YAML or Chart) and pluggable component options (if you install KubeSphere). For more information, see [an example configuration file](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md).
With the configuration file in place, you execute the `./kk` command with varied flags for different operations. After that, KubeKey automatically installs Docker and pulls all the necessary images for installation. When the installation is complete, you can also inspect installation logs.
## Why KubeKey
- The previous ansible-based installer has a bunch of software dependencies such as Python. KubeKey is developed in Go language to get rid of the problem in a variety of environments, making sure the installation is successful.
- KubeKey supports multiple installation options, such as [all-in-one installation](../../../quick-start/all-in-one-on-linux/), [multi-node installation](../multioverview/), and [air-gapped installation](../air-gapped-installation/).
- KubeKey uses Kubeadm to install Kubernetes clusters on nodes in parallel as much as possible in order to reduce installation complexity and improve efficiency. It greatly saves installation time compared to the older installer.
- KubeKey aims to install clusters as an object, i.e., CaaO.
## Download KubeKey
{{< tabs >}}
{{< tab "Good network connections to GitHub/Googleapis" >}}
Download KubeKey from its [GitHub Release Page](https://github.com/kubesphere/kubekey/releases) or use the following command directly.
```bash
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.0.1 sh -
```
{{</ tab >}}
{{< tab "Poor network connections to GitHub/Googleapis" >}}
Run the following command first to make sure you download KubeKey from the correct zone.
```bash
export KKZONE=cn
```
Run the following command to download KubeKey:
```bash
curl -sfL https://get-kk.kubesphere.io | VERSION=v1.0.1 sh -
```
{{< notice note >}}
After you download KubeKey, if you transfer it to a new machine also with poor network connections to Googleapis, you must run `export KKZONE=cn` again before you proceed with the steps below.
{{</ notice >}}
{{</ tab >}}
{{</ tabs >}}
{{< notice note >}}
The commands above download the latest release (v1.0.1) of KubeKey. You can change the version number in the command to download a specific version.
{{</ notice >}}

View File

@ -3,12 +3,12 @@ title: "Multi-node Installation"
keywords: 'Multi-node, Installation, KubeSphere'
description: 'Explain how to install KubeSphere on multiple nodes'
linkTitle: "Multi-node Installation"
weight: 3120
weight: 3130
---
In a production environment, a single-node cluster cannot satisfy most of the needs as the cluster has limited resources with insufficient compute capabilities. Thus, single-node clusters are not recommended for large-scale data processing. Besides, a cluster of this kind is not available with high availability as it only has one node. On the other hand, a multi-node architecture is the most common and preferred choice in terms of application deployment and distribution.
This section gives you an overview of a single-master multi-node installation, including the concept, [KubeKey](https://github.com/kubesphere/kubekey/) and steps. For information about HA installation, refer to [High Availability Configurations](../ha-configuration/), [Installing on Public Cloud](../../public-cloud/install-kubesphere-on-azure-vms/) and [Installing in On-premises Environment](../../on-premises/install-kubesphere-on-bare-metal/).
This section gives you an overview of a single-master multi-node installation, including the concept, [KubeKey](https://github.com/kubesphere/kubekey/) and steps. For information about HA installation, refer to [High Availability Configurations](../../../installing-on-linux/high-availability-configurations/ha-configuration/), [Installing on Public Cloud](../../public-cloud/install-kubesphere-on-azure-vms/) and [Installing in On-premises Environment](../../on-premises/install-kubesphere-on-bare-metal/).
## Video Demonstration
@ -21,19 +21,6 @@ A multi-node cluster is composed of at least one master node and one worker node
- **Master**. A master node generally hosts the control plane that controls and manages the whole system.
- **Worker**. Worker nodes run the actual applications deployed on them.
## Why KubeKey
If you are not familiar with Kubernetes components, you may find it difficult to set up a highly-functional multi-node Kubernetes cluster. Starting from the version 3.0.0, KubeSphere uses a brand-new installer called KubeKey to replace the old ansible-based installer. Developed in Go language, KubeKey allows users to quickly deploy a multi-node architecture.
For users who do not have an existing Kubernetes cluster, they only need to create a configuration file with few commands and add node information (e.g. IP address and node roles) in it after KubeKey is downloaded. With one command, the installation will start and no additional operation is needed.
### Motivation
- The previous ansible-based installer has a bunch of software dependencies such as Python. KubeKey is developed in Go language to get rid of the problem in a variety of environments, making sure the installation is successful.
- KubeKey uses Kubeadm to install Kubernetes clusters on nodes in parallel as much as possible in order to reduce installation complexity and improve efficiency. It will greatly save installation time compared to the older installer.
- With KubeKey, users can scale clusters from an all-in-one cluster to a multi-node cluster, even an HA cluster.
- KubeKey aims to install clusters as an object, i.e., CaaO.
## Step 1: Prepare Linux Hosts
Please see the requirements for hardware and operating system shown below. To get started with multi-node installation in this demo, you need to prepare at least three hosts according to the following requirements. It is possible to install KubeSphere on two nodes with enough resources planned.
@ -100,7 +87,7 @@ This example includes three hosts as below with the master node serving as the t
## Step 2: Download KubeKey
Follow the step below to download KubeKey.
Follow the step below to download [KubeKey](../kubekey).
{{< tabs >}}

View File

@ -2,7 +2,7 @@
title: "Advantages"
keywords: "KubeSphere, Kubernetes, Advantages"
description: "KubeSphere Advantages"
linkTitle: "Advantages"
weight: 1600
---
@ -10,9 +10,9 @@ weight: 1600
Kubernetes has become the de facto standard for deploying containerized applications at scale in private, public and hybrid cloud environments. However, many people can easily get confused when they start to use Kubernetes as it is complicated and has many additional components to manage. Some components need to be installed and deployed by users themselves, such as storage and network services. At present, Kubernetes only provides open-source solutions or projects, which can be difficult to install, maintain and operate to some extent. For users, it is not always easy to quickly get started as they are faced with a steep learning curve.
KubeSphere is designed to reduce or eliminate many Kubernetes headaches related to building, deployment, management, observability and so on. It provides comprehensive services and automates provisioning, scaling and management of applications so that you can focus on code writing. More specifically, KubeSphere boasts an extensive portfolio of features including multi-cluster management, application lifecycle management, multi-tenant management, CI/CD pipelines, service mesh, and observability (monitoring, logging, alerting, auditing, events and notification).
KubeSphere is designed to reduce or eliminate many Kubernetes headaches related to building, deployment, management, observability and so on. It provides comprehensive services and automates provisioning, scaling and management of applications so that you can focus on code writing. More specifically, KubeSphere boasts an extensive portfolio of features including multi-cluster management, application lifecycle management, multi-tenant management, CI/CD pipelines, service mesh, and observability (monitoring, logging, alerting, notifications, auditing and events).
As a comprehensive open-source platform, KubeSphere strives to make the container platform more user-friendly and powerful. With a highly responsive web console, KubeSphere provides a graphic interface for developing, testing and operating, which can be easily accessed in a browser. For users who are accustomed to command-line tools, they can quickly get familiar with KubeSphere as kubectl is also integrated in the fully-functioning web console. With the responsive UI design, users can create, modify and create their apps and resources with a minimal learning curve.
As a comprehensive open-source platform, KubeSphere strives to make the container platform more user-friendly and powerful. For example, KubeSphere provides a highly interactive web console for test and operation. For users who are accustomed to command-line tools, they can quickly get familiar with KubeSphere as kubectl is integrated in the platform. As such, users can create and modify their resources with the minimal learning curve.
In addition, KubeSphere offers excellent solutions to storage and network. Apart from the major open-source storage solutions such as Ceph RBD and GlusterFS, users are also provided with [QingCloud Block Storage](https://docs.qingcloud.com/product/storage/volume/) and [QingStor NeonSAN](https://docs.qingcloud.com/product/storage/volume/super_high_performance_shared_volume/), developed by QingCloud for persistent storage. With the integrated QingCloud CSI and NeonSAN CSI plugins, enterprises can enjoy a more stable and secure services of their apps and data.
@ -20,73 +20,73 @@ In addition, KubeSphere offers excellent solutions to storage and network. Apart
KubeSphere provides high-performance and scalable container service management for enterprises. It aims to help them accomplish digital transformation driven by cutting-edge technologies, and accelerate app iteration and business delivery to meet the ever-changing needs of enterprises.
Here are the six major advantages that make KubeSphere stand out among its counterparts.
Here are the six major advantages of KubeSphere.
### Unified Management of Clusters across Cloud Providers
### Unified management of clusters across cloud providers
As container usage ramps up, enterprises are faced with increased complexity of cluster management as they deploy clusters across cloud and on-premises environments. To address the urgent need of users for a uniform platform to manage heterogeneous clusters, KubeSphere sees a major feature enhancement with substantial benefits. Users can leverage KubeSphere to manage, monitor, import, operate and retire clusters across regions, clouds and environments.
As container usage ramps up, enterprises are faced with increased complexity of cluster management as they deploy clusters across cloud and on-premises environments. To address the urgent need of users for a uniform platform to manage heterogeneous clusters, KubeSphere sees a major feature enhancement with substantial benefits. Users can leverage KubeSphere to manage, monitor, import and operate clusters across regions, clouds and environments.
The feature can be enabled both before and after the installation, giving users great flexibility as they make their own decisions to use KubeSphere for their specific issues. In particular, it features:
The feature can be enabled both before and after the installation. In particular, it features:
**Unified Management**. Users can import Kubernetes clusters either through direct connection or with an agent. With simple configurations, the process can be done within minutes in the interactive console. Once clusters are imported, users are able to monitor the status and operate on cluster resources in a unified way.
**Unified Management**. Users can import Kubernetes clusters either through direct connection or agent connection. With simple configurations, the process can be done within minutes on the interactive web console. Once clusters are imported, users are able to monitor the status and operate on cluster resources through a central control plane.
**High Availability**. This is extremely useful when it comes to disaster recovery. A cluster can run major services with another one serving as the backup. When the major one goes down, services can be quickly taken over by another cluster. The logic is quite similar to the case when clusters are deployed in different regions, as requests can be sent to the closest one for low latency. In short, high availability is achieved across zones and clusters.
**High Availability**. In the multi-cluster architecture of KubeSphere, a cluster can run major services with another one serving as the backup. When the major one goes down, services can be quickly taken over by another cluster. Besides, when clusters are deployed in different regions, requests can be sent to the closest one for low latency. In this way, high availability is achieved across zones and clusters.
For more information, see [Multi-cluster Management](../../multicluster-management/).
### Powerful Observability
### Powerful observability
The observability feature of KubeSphere has been greatly improved with key building blocks enhanced, including monitoring, logging, auditing, events, alerting and notification. The highly functional system allows users to observe virtually everything that happens in the platform. It has much to offer for users with distinct advantages listed as below:
**Customized**. Users are allowed to customize their own monitoring dashboard with multiple display forms available. They can set their own templates based on their needs, add the metric they want to monitor and even choose the display color they prefer. Alerting policies and rules can all be customized as well, including repetition interval, time and threshold.
**Diversified**. Ops teams are freed from the complicated work of recording massive data as KubeSphere monitors resources from virtually all dimensions. It also features an efficient notification system with diversified channels for users to choose from.
**Diversified**. Ops teams are freed from the complicated work of recording massive data as KubeSphere monitors resources from virtually all dimensions. It also features an efficient notification system with diversified channels for users to choose from, such as email, Slack and WeChat Work. On the back of the multi-tenant system of KubeSphere, different tenants are able to query logs, events and auditing logs which are only accessible to them. Filters, keywords, and fuzzy and exact query are supported.
**Visualized and Interactive**. KubeSphere presents users with a graphic web console, especially for the monitoring of different resources. They are displayed in highly interactive graphs that give users a clear view of what is happening inside a cluster. Resources at different levels can also be sorted based on their usage, which is convenient for users to compare for further data analysis.
**Accurate**. The entire monitoring system functions at second-level precision that allow users to quickly locate any component failures. In terms of events and auditing, all activities are accurately recorded for future reference.
For more information, see Project Administration and Usage.
For more information, see related sections in [Cluster Administration](../../cluster-administration/), [Project User Guide](../../project-user-guide/) and [Toolbox](../../toolbox/).
### Automated DevOps
Automation represents a key part of implementing DevOps. With automatic, streamlined pipelines in place, users are better positioned to distribute apps in terms of continuous delivery and integration.
**Jenkins-powered**. KubeSphere DevOps system is built with Jenkins as the engine, which is abundant in plugins. On top of that, Jenkins provides an enabling environment for extension development, making it possible for the DevOps team to work smoothly across the whole process (developing, testing, building, deploying, monitoring, logging, notifying, etc.) in a unified platform. The KubeSphere account can also be used for the built-in Jenkins, meeting the demand of enterprises for multi-tenant isolation of CI/CD pipelines and unified authentication.
**Jenkins-powered**. The KubeSphere DevOps system is built with Jenkins as the engine, which is abundant in plugins. On top of that, Jenkins provides an enabling environment for extension development, making it possible for the DevOps team to work smoothly across the whole process (developing, testing, building, deploying, monitoring, logging, notifying, etc.) in a unified platform. The KubeSphere account can also be used for the built-in Jenkins, meeting the demand of enterprises for multi-tenant isolation of CI/CD pipelines and unified authentication.
**Convenient built-in tools**. Users can easily take advantage of automation tools (e.g. Binary-to-Image and Source-to-Image) even without a thorough understanding of how Docker or Kubernetes works. They only need to submit a registry address or upload binary files (e.g. JAR/WAR/Binary). Ultimately, services will be released to Kubernetes automatically without any coding in a Dockerfile.
For more information, see DevOps Administration.
For more information, see [DevOps User Guide](../../devops-user-guide/).
### Fine-grained Access Control
### Fine-grained access control
KubeSphere users are allowed to implement fine-grained access control across different levels, including clusters, workspaces and projects. Users with specific roles can operate on different resources if they are authorized to do so.
KubeSphere supports fine-grained access control across different levels, including clusters, workspaces and projects. Users with specific roles can operate on different resources.
**Self-defined**. Apart from system roles, KubeSphere empowers users to define their roles with a spectrum of operations that they can assign to tenants. This meets the need of enterprises for detailed task allocation as they can decide who should be responsible for what while not being affected by irrelevant resources.
**Secure**. As tenants at different levels are completely isolated from each other, they can share resources while not affecting one another. The network can also be completely isolated to ensure data security.
For more information, see Role and Member Management in Workspace.
For more information, see Role and Member Management in [Workspaces](../../workspace-administration/role-and-member-management/) and [Projects](../../project-administration/role-and-member-management/) respectively.
### Out-of-Box Microservices Governance
### Out-of-box microservices governance
On the back of Istio, KubeSphere features major grayscale strategies. All these features are out of the box, which means consistent user experiences without any code hacking. Traffic control, for example, plays an essential role in microservices governance. In this connection, Ops teams, in particular, are able to implement operational patterns (e.g. circuit breaking) to compensate for poorly behaving services. Here are two major reasons why you use microservices governance, or service mesh in KubeSphere:
On the back of Istio, KubeSphere features multiple grayscale strategies. All these features are out of the box, which means consistent user experiences without any code hacking. Here are two major advantages of microservices governance, or service mesh in KubeSphere:
- **Comprehensive**. KubeSphere provides users with a well-diversified portfolio of solutions to traffic management, including canary release, blue-green deployment, traffic mirroring and circuit breaking. In addition, the distributed tracing feature also helps users monitor apps, locate failures, and improve performance.
- **Visualized**. With a highly responsive web console, KubeSphere allows users to view how microservices interconnect with each other in a straightforward way.
- **Comprehensive**. KubeSphere provides users with a well-diversified portfolio of solutions to traffic management, including canary release, blue-green deployment, traffic mirroring and circuit breaking.
- **Visualized**. With a highly interactive web console, KubeSphere allows users to view how microservices interconnect with each other in a straightforward way. This helps users to monitor apps, locate failures, and improve performance.
KubeSphere aims to make service-to-service calls within the microservices architecture reliable and fast. For more information, see Project Administration and Usage.
### Vibrant Open Source Community
### Vibrant open source community
As an open-source project, KubeSphere represents more than just a container platform for app deployment and distribution. We believe that a true open-source model focuses more on sharing, discussions and problem solving with everyone involved. Together with partners, ambassadors and contributors, and other community members, we file issues, submit pull requests, participate in meetups, and exchange ideas of innovation.
As an open-source project, KubeSphere represents more than just a container platform for app deployment and distribution. The KubeSphere team believes that a true open-source model focuses more on sharing, discussions and problem solving with everyone involved. Together with partners, ambassadors and contributors, and other community members, the KubeSphere team files issues, submits pull requests, participates in meetups, and exchanges ideas of innovation.
At KubeSphere, we have the capabilities and technical know-how to help you share the benefits that the open-source model can offer. More importantly, we have community members from around the world who make everything here possible.
The KubeSphere community has the capabilities and technical know-how to help you share the benefits that the open-source model can offer. More importantly, it is home to open-source enthusiasts from around the world who make everything here possible.
**Partners**. KubeSphere partners play a critical role in KubeSphere's go-to-market strategy. They can be app developers, technology companies, cloud providers or go-to-market partners, all of whom drive the community ahead in their respective aspects.
**Ambassadors**. As community representatives, ambassadors promote KubeSphere in a variety of ways (e.g. activities, blogs and user cases) so that more people can join us.
**Ambassadors**. As community representatives, ambassadors promote KubeSphere in a variety of ways (e.g. activities, blogs and user cases) so that more people can join the community.
**Contributors**. KubeSphere contributors help the whole community by contributing to code or documentation. You don't need to be an expert while you can still make a different even it is a minor code fix or language improvement.
**Contributors**. KubeSphere contributors help the whole community by contributing to code or documentation. You don't need to be an expert while you can still make a difference even it is a minor code fix or language improvement.
For more information, see [Partner Program](https://kubesphere.io/partner/) and [Community Governance](https://kubesphere.io/contribution/).

View File

@ -10,6 +10,10 @@ If the kube-apiserver address of the Member Cluster (M Cluster) is accessible on
To use the multi-cluster feature using direct connection, you must have at least two clusters serving as the H Cluster and the M Cluster respectively. A cluster can be defined as the H Cluster or the M Cluster either before or after you install KubeSphere. For more information about installing KubeSphere, refer to [Installing on Linux](../../../installing-on-linux) and [Installing on Kubernetes](../../../installing-on-kubernetes).
## Video Demonstration
{{< youtube i-yWU4izFPo >}}
## Prepare a Host Cluster
A host cluster provides you with the central control plane and you can only define one host cluster.

View File

@ -70,7 +70,7 @@ The following section will get token of serviceaccount `kubesphere` created by K
```bash
TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d)
kubectl config set-credentials kubesphere --token=${TOKEN}
kubectl config set-credentials --current --user=kubesphere
kubectl config set-context --current --user=kubesphere
```
Check the new kubeconfig.

View File

@ -51,7 +51,7 @@ The request and limit of CPU and memory resources all refer to single replica.
| -------------- | ------------------------------------------------------------ | ---------------------------- | ---------------------------- |
| Sub-component | 2 x Prometheus | 3 x Alertmanager | Notification Manager |
| CPU Request | 100 m | 10 m | 100 m |
| CPU Limit | 4 core | | 500 m |
| CPU Limit | 4 cores | | 500 m |
| Memory Request | 400 MiB | 30 MiB | 20 MiB |
| Memory Limit | 8 GiB | | 1 GiB |
| Installation | Required | Required | Required |

View File

@ -0,0 +1,90 @@
---
title: "Disk Log Collection"
keywords: 'KubeSphere, Kubernetes, project, disk, log, collection'
description: 'Disk Log Collection'
linkTitle: "Disk Log Collection"
weight: 13600
---
KubeSphere supports multiple log collection methods so that Ops teams can collect, manage and analyze logs in a unified and flexible way.
This tutorial demonstrates how to collect disk logs for an example app.
## Prerequisites
You need to create a workspace, a project and an account (`project-admin`). The account must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project).
## Enable Disk Log Collection
1. Log in to the web console of KubeSphere as `project-admin` and go to your project.
2. From the left navigation bar, select **Advanced Settings** in **Project Settings**. Under **Disk Log Collection**, enable the feature through the toggle switch.
![enable-disk-log-collection](/images/docs/project-administration/disk-log-collection/enable-disk-log-collection.png)
## Create a Deployment
1. From the left navigation bar, select **Workloads** in **Application Workloads**. Under the **Deployments** tab, click **Create**.
2. In the dialog that appears, set a name for the Deployment (e.g. `demo-deployment`) and click **Next**.
3. Under **Container Image**, click **Add Container Image**.
4. Enter `alpine` in the search bar to use the image (tag: `latest`) as an example.
![alpine-image](/images/docs/project-administration/disk-log-collection/alpine-image.png)
5. Scroll down to **Start Command** and check it. Input the following values for **Run Command** and **Parameters** respectively, click **√**, and then click **Next**.
**Run Command**
```bash
/bin/sh
```
**Parameters**
```bash
-c,if [ ! -d /data/log ];then mkdir -p /data/log;fi; while true; do date >> /data/log/app-test.log; sleep 30;done
```
{{< notice note >}}
The command and parameters above mean that the date information will be exported to `app-test.log` in `/data/log` every 30 seconds.
{{</ notice >}}
![run-command](/images/docs/project-administration/disk-log-collection/run-command.png)
6. On the **Mount Volumes** tab, enable **Disk Log Collection** and click **Add Volume**.
![mount-volumes](/images/docs/project-administration/disk-log-collection/mount-volumes.png)
7. On the **Temporary Volume** tab, input a name for the volume (e.g. `demo-disk-log-collection`) and set the access mode and path. Refer to the image below as an example.
![volume-example](/images/docs/project-administration/disk-log-collection/volume-example.png)
Click **√**, and then click **Next** to continue.
8. Click **Create** in **Advanced Settings** to finish the process.
{{< notice note >}}
For more information, see [Deployments](../../project-user-guide/application-workloads/deployments/).
{{</ notice >}}
## View Logs
1. Under the **Deployments** tab, click the Deployment just created to go to its detail page.
2. In **Resource Status**, click the arrow on the right to view container details, and then click the log icon of `logsidecar-container` (filebeat container) to inspect disk logs.
![container-log](/images/docs/project-administration/disk-log-collection/container-log.png)
![inspect-logs](/images/docs/project-administration/disk-log-collection/inspect-logs.png)
3. Alternatively, you can also use the **Log Search** function from **Toolbox** in the bottom right corner to view stdout logs. For example, use the Pod name of the Deployment for a fuzzy query:
![fuzzy-match](/images/docs/project-administration/disk-log-collection/fuzzy-match.png)

View File

@ -25,7 +25,7 @@ This tutorial demonstrates how to view alerting messages at the workload level.
![alerting_message_workload_level_list](/images/docs/alerting/alerting_message_workload_level_list.png)
2. Select one of the alerting messages to enter the detail page. In **Alerting Detail**, you can see the graph of the memory usage of the monitored workload over time, which has been continuously higher than the threshold of 20 MiB set in the alert rule, so the alert was triggered.
2. Select one of the alerting messages to go to its detail page. In **Alerting Detail**, you can see the graph of the memory usage of the monitored workload over time, which has been continuously higher than the threshold of 20 MiB set in the alert rule, so the alert was triggered.
![alerting_message_workload_level_detail](/images/docs/alerting/alerting_message_workload_level_detail.png)

View File

@ -1,10 +1,114 @@
---
title: "Compose a Microservice App"
keywords: 'kubesphere, kubernetes, docker, devops, service mesh, openpitrix'
description: 'Compose a microservice-based application'
title: "Create a Microservices-based App"
keywords: 'KubeSphere, Kubernetes, service mesh, microservices'
description: 'Create a microservices-based app'
linkTitle: "Create a Microservices-based App"
weight: 10140
---
TBD
With each microservice handling a single part of the app's functionality, an app can be divided into different components. These components have their own responsibilities and limitations, independent from each other. In KubeSphere, this kind of app is called **Composing App**, which can be built through newly created Services or existing Services.
This tutorial demonstrates how to create a microservices-based app Bookinfo, which is composed of four Services, and set a customized domain name to access the app.
## Prerequisites
- You need to create a workspace, a project, and a user account (`project-regular`) for this tutorial. The account needs to be invited to the project with the `operator` role. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/).
- `project-admin` needs to [set the project gateway](../../../project-administration/project-gateway/) so that `project-regular` can define a domain name when creating the app.
## Create Microservices that Compose an App
1. Log in to the web console of KubeSphere and navigate to **Applications** in **Application Workloads** of your project. In the **Composing App** tab, click **Create Composing Application**.
![create-composing-app](/images/docs/project-user-guide/applications/create-a-microservices-based-app/create-composing-app.png)
2. Set a name for the app (e.g. `bookinfo`) and click **Next**.
3. On the **Components** page, you need to create microservices that compose the app. Click **Add Service** and select **Stateless Service**.
4. Set a name for the Service (e.g `productpage`) and click **Next**.
![product-page](/images/docs/project-user-guide/applications/create-a-microservices-based-app/product-page.png)
{{< notice note >}}
You can create a Service on the dashboard directly or enable **Edit Mode** in the top right corner to edit the YAML file.
{{</ notice >}}
5. Click **Add Container Image** under **Container Image** and input `kubesphere/examples-bookinfo-productpage-v1:1.13.0` in the search bar to use the Docker Hub image.
![container-image](/images/docs/project-user-guide/applications/create-a-microservices-based-app/container-image.png)
{{< notice note >}}
You must press **Enter** in your keyboard after you input the image name.
{{</ notice >}}
6. Click **Use Default Ports**. For more information about image settings, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/). Click **√** in the bottom right corner and **Next** to continue.
7. On the **Mount Volumes** page, [add a volume](../../../project-user-guide/storage/volumes/) or click **Next** to continue.
8. Click **Add** on the **Advanced Settings** page directly.
9. Similarly, add the other three microservices for the app. Here is the image information:
| Service | Name | Image |
| --------- | --------- | ------------------------------------------------ |
| Stateless | `details` | `kubesphere/examples-bookinfo-details-v1:1.13.0` |
| Stateless | `reviews` | `kubesphere/examples-bookinfo-reviews-v1:1.13.0` |
| Stateless | `ratings` | `kubesphere/examples-bookinfo-ratings-v1:1.13.0` |
10. When you finish adding microservices, click **Next**.
![microservices-done](/images/docs/project-user-guide/applications/create-a-microservices-based-app/microservices-done.png)
11. On the **Internet Access** page, click **Add Route Rule**. In the **Specify Domain** tab, set a domain name for your app (e.g. `demo.bookinfo`) and select `http` in the **Protocol** field. For `Paths`, select the Service `productpage` and port `9080`. Click **OK** to continue.
![route](/images/docs/project-user-guide/applications/create-a-microservices-based-app/route.png)
{{< notice note >}}
The button **Add Route Rule** is not visible if the project gateway is not set.
{{</ notice >}}
12. You can add more rules or click **Create** to finish the process.
13. Wait for your app to reach the **Ready** status.
![status-active](/images/docs/project-user-guide/applications/create-a-microservices-based-app/status-active.png)
## Access the App
1. As you set a domain name for the app, you need to add an entry in the hosts (`/etc/hosts`) file. For example, add the IP address and hostname as below:
```txt
192.168.0.9 demo.bookinfo
```
{{< notice note >}}
You must add your **own** IP address and hostname.
{{</ notice >}}
2. In **Composing App**, click the app you just created.
3. In **Application Components**, click **Click to visit** to access the app.
![click-to-visit](/images/docs/project-user-guide/applications/create-a-microservices-based-app/click-to-visit.png)
![dashboard](/images/docs/project-user-guide/applications/create-a-microservices-based-app/dashboard.png)
{{< notice note >}}
Make sure you open the port in your security group.
{{</ notice >}}
4. Click **Normal user** and **Test user** respectively to see other **Services**.
![review-page](/images/docs/project-user-guide/applications/create-a-microservices-based-app/review-page.png)

View File

@ -99,7 +99,7 @@ Select **Image Registry Secret** for **Type**. To use images from your private r
{{</ notice >}}
4. Click **Create**. Later, the Secret will appear on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](http://localhost:1313/docs/project-user-guide/configuration/secrets/#check-secret-details).
4. Click **Create**. Later, the Secret will appear on the **Secrets** page. For more information about how to edit the Secret after you create it, see [Check Secret Details](../../project-user-guide/configuration/secrets/#check-secret-details).
**Https**

View File

@ -61,7 +61,7 @@ You need to deploy MySQL exporter in `demo` on the same cluster. MySQL exporter
![set-servicemonitor-to-true](/images/docs/project-user-guide/custom-application-monitoring/set-servicemonitor-to-true.jpg)
{{< notice warning >}}
Don't forget to enable the SericeMonitor CRD if you are using external exporter Helm charts. Those charts usually disable ServiceMonitor by default and require manual modification.
Don't forget to enable the ServiceMonitor CRD if you are using external exporter Helm charts. Those charts usually disable ServiceMonitor by default and require manual modification.
{{</ notice >}}
4. Modify MySQL connection parameters. MySQL exporter needs to connect to the target MySQL. In this tutorial, MySQL is installed with the service name `mysql-a8xgvx`. Set `mysql.host` to `mysql-a8xgvx`, `mysql.pass` to `testing`, and `user` to `root` as below. Note that your MySQL service may be created with **a different name**.
@ -93,5 +93,5 @@ After about two minutes, you can create a monitoring dashboard for MySQL and vis
![monitor-mysql-done](/images/docs/project-user-guide/custom-application-monitoring/monitor-mysql-done.jpg)
{{< notice tip >}}
For more information about dashboard strings, see [Visualization](../../../../project-user-guide/custom-application-monitoring/visualization/overview/).
For more information about dashboard properties, see [Visualization](../../../../project-user-guide/custom-application-monitoring/visualization/overview/).
{{</ notice >}}

View File

@ -11,7 +11,7 @@ This section walks you through monitoring a sample web application. The applicat
## Prerequisites
- Please make sure you [enable the OpenPitrix system](../../../../pluggable-components/app-store/).
- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited as the workspace self provisioner with the `self-provisioner` role. Namely, create an account `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (e.g. `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`.
- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited to the workspace with the `self-provisioner` role. Namely, create an account `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (e.g. `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`.
- Knowledge of Helm charts and [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/).
@ -27,7 +27,7 @@ In this tutorial, you use the made-ready image `kubespheredev/promethues-example
### Step 2: Pack the application into a Helm chart
Pack the Deployment, Service, and ServiceMonitor YAML template into a Helm chat for reuse. In the Deployment and Service template, you define the sample web container and the port for the metrics endpoint. ServiceMonitor is a custom resource defined and used by Prometheus Operator. It connects your application and KubeSphere monitoring engine (Prometheus) so that the engine knows where and how to scrape metrics. In future releases, KubeSphere will provide a graphical user interface for easy operation.
Pack the Deployment, Service, and ServiceMonitor YAML template into a Helm chart for reuse. In the Deployment and Service template, you define the sample web container and the port for the metrics endpoint. ServiceMonitor is a custom resource defined and used by Prometheus Operator. It connects your application and KubeSphere monitoring engine (Prometheus) so that the engine knows where and how to scrape metrics. In future releases, KubeSphere will provide a graphical user interface for easy operation.
Find the source code in the folder `helm` in [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app). The Helm chart package is made ready and is named `prometheus-example-app-0.1.0.tgz`. Please download the .tgz file and you will use it in the next step.
@ -53,7 +53,7 @@ Find the source code in the folder `helm` in [kubesphere/prometheus-example-app]
### Step 4: Deploy the sample web application
You need to deploy the sample web application into `demo`. For demonstration purposes, you can simply run a test deployment.
You need to deploy the sample web application into `test`. For demonstration purposes, you can simply run a test deployment.
1. Click `prometheus-example-app`.

View File

@ -134,7 +134,7 @@ In this step, you create a project using the account `project-admin` created in
![click-demo-project](/images/docs/quickstart/create-workspaces-projects-accounts/click-demo-project.png)
4. On the **Overview** page of the project, the project quota remains unset by default. You can click **Set** and specify resource requests and limits based on your needs (e.g. 1 core for CPU and 1000Gi for memory).
4. On the **Overview** page of the project, the project quota remains unset by default. You can click **Set** and specify [resource requests and limits](../../workspace-administration/project-quotas/) as needed (e.g. 1 core for CPU and 1000Gi for memory).
![quota](/images/docs/quickstart/create-workspaces-projects-accounts/quota.png)

View File

@ -34,16 +34,12 @@ Not only did developers from the KubeSphere team participate in the event, our g
We also provided on-site DevOps workshop to discuss and try the most common use features of KubeSphere.
We also provided an on-site DevOps workshop to discuss and try the most common use features of KubeSphere.
![Snip20210201_20](/images/news/meetup-2020/Snip20210201_20.png)
![Snip20210201_22](/images/news/meetup-2020/Snip20210201_22.png)
KubeSphere has been recognized by many users across the world and has developed a diversified and vibrant community. Though so many passionate users could not present at the event in person, they generously shared their experience and expectations for KubeSphere in videos.
KubeSphere has been recognized by many users across the world and has developed a diversified and vibrant community. Though so many passionate users could not present at the event in person, they generously shared their experience and expectations for KubeSphere in videos.
<iframe width="677" height="381" src="https://www.youtube.com/embed/fPE2uloVw8A" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>

View File

@ -0,0 +1,268 @@
---
title: '你真的理解 K8s 中的 requests 和 limits 吗?'
tag: 'Kubernetes,KubeSphere,schedule,monitoring'
createTime: '2021-01-01'
author: '饶云坤'
snapshot: ''
---
在 K8s 集群中部署资源的时候,你是否经常遇到以下情形:
1. 经常在 K8s 集群种部署负载的时候不设置 CPU `requests` 或将 CPU `requests` 设置得过低(这样“看上去”就可以在每个节点上容纳更多 Pod )。在业务比较繁忙的时候,节点的 CPU 全负荷运行。业务延迟明显增加,有时甚至机器会莫名其妙地进入 CPU 软死锁等“假死”状态。
2. 类似地,部署负载的时候,不设置内存 `requests` 或者内存 `requests` 设置得过低,这时会发现有些 Pod 会不断地失败重启。而不断重启的这些 Pod 通常跑的是 Java 业务应用。但是这些 Java 应用本地调试运行地时候明明都是正常的。
3. 在 K8s 集群中,集群负载并不是完全均匀地在节点间分配的,通常内存不均匀分配的情况较为突出,集群中某些节点的内存使用率明显高于其他节点。 K8s 作为一个众所周知的云原生分布式容器编排系统,一个所谓的事实上标准,其调度器不是应该保证资源的均匀分配吗?
如果在业务高峰时间遇到上述问题,并且机器已经 hang 住甚至无法远程 ssh 登陆,那么通常留给集群管理员的只剩下重启集群这一个选项。如果你遇到过上面类似的情形,想了解如何规避相关问题或者你是 K8s 运维开发人员,想对这类问题的本质一探究竟,那么请耐心阅读下面的章节。我们会先对这类问题做一个定性分析,并给出避免此类问题的最佳实践,最后如果你对 K8s `requests``limits` 的底层机制感兴趣,我们可以从源码角度做进一步地分析,做到“知其然也知其所以然”。
## 问题分析
首先我们需要知道对于 CPU 和内存这 2 类资源,他们是有一定区别的。 CPU 属于可压缩资源,其中 CPU 资源的分配和管理是 Linux 内核借助于完全公平调度算法( CFS )和 Cgroup 机制共同完成的。简单地讲,如果 pod 中服务使用 CPU 超过设置的 CPU `limits` pod 的 CPU 资源会被限流( throttled )。对于没有设置`limit`的 pod ,一旦节点的空闲 CPU 资源耗尽,之前分配的 CPU 资源会逐渐减少。不管是上面的哪种情况,最终的结果都是 Pod 已经越来越无法承载外部更多的请求,表现为应用延时增加,响应变慢。这种情形对于上面的情形 1 。内存属于不可压缩资源, Pod 之间是无法共享的,完全独占的,这也就意味着资源一旦耗尽或者不足,分配新的资源一定是会失败的。有的 Pod 内部进程在初始化启动时会提前开辟出一段内存空间。比如 JVM 虚拟机在启动的时候会申请一段内存空间。如果内存 `requests` 指定的数值小于 JVM 虚拟机向系统申请的内存,导致内存申请失败( oom-kill ),从而 Pod 出现不断地失败重启。这种情形对应于上面的情形 2 。对于情形 3 ,实际上在创建 pod 的过程中,一方面, K8s 需要拨备包含 CPU 和内存在内的多种资源,这里的资源均衡是包含 CPU 和内存在内的所有资源的综合考量。另一方面, K8s 内置的调度算法不仅仅涉及到“最小资源分配节点”,还会把其他诸如 Pod 亲和性等因素考虑在内。并且 k8s 调度基于的是资源的 `requests` 数值,而之所以往往观察到的是内存分布不够均衡,是因为对于应用来说,相比于其他资源,内存一般是更紧缺的一类资源。另一方面, K8s 的调度机制是基于当前的状态。比如当出现新的 Pod 进行调度时,调度程序会根据其当时对 Kubernetes 集群的资源描述做出最佳调度决定。但是 Kubernetes 集群是非常动态的,由于整个集群范围内的变化,比如一个节点为了维护,我们先执行了驱逐操作,这个节点上的所有 Pod 会被驱逐到其他节点去,但是当我们维护完成后,之前的 Pod 并不会自动回到该节点上来,因为 Pod 一旦被绑定了节点是不会触发重新调度的。
## 最佳实践
由上面的分析我们可以看到,集群的稳定性直接决定了其上运行的业务应用的稳定性。而临时性的资源短缺往往是导致集群不稳定的主要因素。集群一旦不稳定,轻则业务应用的性能下降,重则出现相关结点不可用。那么如何提高集群的稳定性呢?一方面,可以通过[编辑 Kubelet 配置文件](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/)来预留一部分系统资源,从而保证当可用计算资源较少时 kubelet 所在节点的稳定性。 这在处理如内存和硬盘之类的不可压缩资源时尤为重要。另一方面,通过合理地设置 pod 的 QoS 可以进一步提高集群稳定性:不同 QoS 的 Pod 具有不同的 OOM 分数,当出现资源不足时,集群会优先 Kill 掉 `Best-Effort` 类型的 Pod ,其次是 `Burstable` 类型的 Pod ,最后是`Guaranteed` 类型的 Pod 。因此,如果资源充足,可将 QoS pods 类型均设置为 `Guaranteed` 。用计算资源换业务性能和稳定性,减少排查问题时间和成本。同时如果想更好的提高资源利用率,业务服务也可以设置为 `Guaranteed` ,而其他服务根据重要程度可分别设置为 `Burstable``Best-Effort` 。下面我们会以 Kubesphere 平台为例,演示如何方便优雅地配置 Pod 相关的资源。
## KubeSphere 资源配置实践
前面我们已经了解到 K8s 中`requests`、`limits`这 2 个参数的合理设置对整个集群的稳定性至关重要。而作为 K8s 的发行版 Kubephere ,极大地降低了 K8s 的学习门槛,配合简介美观的 UI 界面,你会发现有效运维原来是一件如此轻松的事情。下面我们将演示如何在 KubeSphere 平台中配置容器的相关资源配额与限制。
### 相关概念
在进行演示之前让我们再回顾一下K8s相关概念。
#### requests 与 limits 简介
为了实现 K8s 集群中资源的有效调度和充分利用, K8s 采用`requests`和`limits`两种限制类型来对资源进行容器粒度的分配。每一个容器都可以独立地设定相应的`requests`和`limits`。这 2 个参数是通过每个容器 containerSpec 的 resources 字段进行设置的。一般来说,在调度的时候`requests`比较重要,在运行时`limits`比较重要。
```yaml
resources:
requests:
cpu: 50m
memory: 50Mi
limits:
cpu: 100m
memory: 100Mi
```
`requests`定义了对应容器需要的最小资源量。这句话的含义是,举例来讲,比如对于一个 Spring Boot 业务容器,这里的`requests`必须是容器镜像中 JVM 虚拟机需要占用的最少资源。如果这里把 pod 的内存`requests`指定为 10Mi 显然是不合理的JVM 实际占用的内存 Xms 超出了 K8s 分配给 pod 的内存,导致 pod 内存溢出,从而 K8s 不断重启 pod 。
`limits`定义了这个容器最大可以消耗的资源上限,防止过量消耗资源导致资源短缺甚至宕机。特别的,设置为 0 表示对使用的资源不做限制。值得一提的是,当设置`limits`而没有设置`requests`时Kubernetes 默认令`requests`等于`limits`。
进一步可以把`requests`和`limits`描述的资源分为 2 类:可压缩资源(例如 CPU )和不可压缩资源(例如内存)。合理地设置`limits`参数对于不可压缩资源来讲尤为重要。
前面我们已经知道`requests`参数会最终的 K8s 调度结果起到直接的显而易见的影响。借助于 Linux 内核 Cgroup 机制,`limits`参数实际上是被 K8s 用来约束分配给进程的资源。对于内存参数而言,实际上就是告诉 Linux 内核什么时候相关容器进程可以为了清理空间而被杀死( oom-kill )。
总结一下:
- 对于 CPU如果 pod 中服务使用 CPU 超过设置的`limits`pod 不会被 kill 掉但会被限制。如果没有设置 limits pod 可以使用全部空闲的 CPU 资源。
- 对于内存,当一个 pod 使用内存超过了设置的`limits`pod 中 container 的进程会被 kernel 因 OOM kill 掉。当 container 因为 OOM 被 kill 掉时,系统倾向于在其原所在的机器上重启该 container 或本机或其他重新创建一个 pod。
- 0 <= requests <=Node Allocatable, requests <= limits <= Infinity
#### Pod 的服务质量( QoS
Kubernetes 创建 Pod 时就给它指定了下列一种 QoS 类GuaranteedBurstableBestEffort。
- GuaranteedPod 中的每个容器包含初始化容器必须指定内存和CPU的`requests`和`limits`,并且两者要相等。
- BurstablePod 不符合 Guaranteed QoS 类的标准Pod 中至少一个容器具有内存或 CPU `requests`
- BestEffortPod 中的容器必须没有设置内存和 CPU `requests`或`limits`。
结合结点上 Kubelet 的CPU管理策略可以对指定 pod 进行绑核操作,参见[官方文档](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)。
### 准备工作
您需要创建一个企业空间、一个项目和一个帐户 ( ws-admin ),务必邀请该帐户到项目中并赋予 admin 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](https://kubesphere.io/zh/docs/quick-start/create-workspace-and-project/)。
### 设置项目配额( Resource Quotas
1. 进入项目基本信息界面,依次直接点击“项目管理 -> 编辑配额”进入项目的配额设置页面。
![](../../../images/blogs/deep-dive-into-the-K8s-request-and-limit/ksnip_20210122-194612.png)
2. 进入项目配额页面,为该项目分别指定`requests`和`limits`配额。
![](../../../images/blogs/deep-dive-into-the-K8s-request-and-limit/ksnip_20210122-193909.png)
设置项目配额的有 2 方面的作用:
- 限定了该项目下所有 pod 指定的`requests`和`limits`之和分别要小于等与这里指定的项目的总`requests`和`limits`。
- 如果在项目中创建任何一个容器没有指定`requests`或者`limits`,那么相应的资源会创建报错,并会以事件的形式给出报错提示。
可以看到,设定项目配额以后,在该项目中创建任何容器都需要指定`requests`和`limits`隐含实现了所谓的“code is law”即人人都需要遵守的规则。
> Kubesphere中的项目配额等价于 K8s 中的 resource quotas ,项目配额除了能够以项目为单位管理 CPU 和内存的使用使用分配情况,还能够管理其他类型的资源数目等,详细信息参见[资源配额](https://kubernetes.io/docs/concepts/policy/resource-quotas/)。
### 设置容器资源的默认请求
上面我们已经讨论过项目中开启了配额以后,那么之后创建的 pod 必须明确指定相应的 `requests``limits` 。事实上,在实际的测试或者生产环境当中,大部分 pod 的 `requests``limits` 是高度相近甚至完全相同的。有没有办法在项目中,事先设定好默认的缺省 `requests``limits` ,当用户没有指定容器的 `requests``limits` 时,直接应用默认值,若 pod 已经指定 `requests``limits` 是否直接跳过呢?答案是肯定的。
1. 进入项目基本信息界面,依次直接点击“项目管理 -> 编辑资源默认请求”进入项目的默认请求设置页面。
![](../../../images/blogs/deep-dive-into-the-K8s-request-and-limit/ksnip_20210122-194745.png)
2. 进入项目配额页面,为该项目分别指定 CPU 和内存的默认值。
![](../../../images/blogs/deep-dive-into-the-K8s-request-and-limit/ksnip_20210122-194358.png)
> KubeSphere 中的项目种的容器资源默认请求是借助于 K8s 中的 Limit Ranges ,目前 KubeSphere 支持 CPU 和内存的`requests`和`limits`的默认值设定。
前面我们已经了解到,对于一些关键的业务容器,通常其流量和负载相比于其他 pod 都是比较高的,对于这类容器的`requests`和`limits`需要具体问题具体分析。分析的维度是多个方面的,例如该业务容器是 CPU 密集型的,还是 IO 密集型的。是单点的还是高可用的,这个服务的上游和下游是谁等等。另一方面,在生产环境中这类业务容器的负载从一个比较长的时间维度看的话,往往是具有周期性的。因此,业务容器的历史监控数据可以在参数设置方面提供重要的参考价值。而 KubeSphere 在最初的设计中,就已经在架构层面考虑到了这点,将 Prometheus 组件无缝集成到 KubeSphere 平台中,并提供纵向上至集群层级,下至 pod 层级的完整的监控体系。横向涵盖 CPU ,内存,网络,存储等。一般,`requests`值可以设定为历史数据的均指,而`limits`要大于历史数据的均指,最终数值还需要结合具体情况做一些小的调整。
## 源码分析
前面我们从日常 K8s 运维出发,描述了由于 `requests``limits`参数配置不当而引起的一系列问题,阐述了问题产生的原因并给出的最佳实践。下面我们将深入到 K8s 内部,从代码里表征的逻辑关系来进一步分析和验证上面给出的结论。
### requests 是如何影响 K8s 调度决策的?
我们知道在 K8s 中 pod 是最小的调度单位pod 的`requests`与 pod 内容器的`requests`关系如下:
```golang
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
result := &preFilterState{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
result.SetMaxResource(container.Resources.Requests)
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
result.Add(pod.Spec.Overhead)
}
return result
}
...
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod))
return nil
}
...
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
return nil, fmt.Errorf("error reading %q from cycleState: %v", preFilterStateKey, err)
}
s, ok := c.(*preFilterState)
if !ok {
return nil, fmt.Errorf("%+v convert to NodeResourcesFit.preFilterState error", c)
}
return s, nil
}
...
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
}
insufficientResources := fitsRequest(s, nodeInfo, f.ignoredResources, f.ignoredResourceGroups)
if len(insufficientResources) != 0 {
// We will keep all failure reasons.
failureReasons := make([]string, 0, len(insufficientResources))
for _, r := range insufficientResources {
failureReasons = append(failureReasons, r.Reason)
}
return framework.NewStatus(framework.Unschedulable, failureReasons...)
}
return nil
}
```
从上面的源码中不难看出,调度器(实际上是 Schedule thread )首先会在 Pre filter 阶段计算出待调度 pod 所需要的资源,具体讲就是从 Pod Spec 中分别计算初始容器和工作容器`requests`之和,并取其较大者,特别地,对于像 Kata-container 这样微虚机,其自身的虚拟化开销相比于容器来说是不能忽略不计的,所以还需要加上虚拟化本身的资源开销,计算出的结果存入到缓存中,在紧接着的 Filter 阶段,会遍历所有节点过滤出符合符合条件的节点。
实际上在过滤出所有符合条件的节点以后,如果当前满足的条件的节点只有一个,那么该 pod 随后将被调度到该结点。但是更多的情况下,此时过滤之后符合条件的结点往往有多个,这时候就需要进入 Score 阶段,依次对这些结点进行打分( Score )。而打分本身也是包括多个维度通过内置 plugin 的形式综合评判的。值得注意的是,前面我们定义的 pod 的`requests`和`limits`参数也会直接影响到`NodeResourcesLeastAllocated`算法最终的计算结果。源码如下:
```golang
func leastResourceScorer(resToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap, bool, int, int) int64 {
return func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
var nodeScore, weightSum int64
for resource, weight := range resToWeightMap {
resourceScore := leastRequestedScore(requested[resource], allocable[resource])
nodeScore += resourceScore * weight
weightSum += weight
}
return nodeScore / weightSum
}
}
...
func leastRequestedScore(requested, capacity int64) int64 {
if capacity == 0 {
return 0
}
if requested > capacity {
return 0
}
return ((capacity - requested) * int64(framework.MaxNodeScore)) / capacity
}
```
可以看到在`NodeResourcesLeastAllocated`算法中,对于同一个 pod ,目标结点的资源越充裕,那么该结点的得分也就越高。换句话说,同一个 pod 更倾向于调度到资源充足的结点。需要注意的是,实际上在创建 pod 的过程中,一方面, K8s 需要拨备包含 CPU 和内存在内的多种资源。每种资源都会对应一个权重(对应源码中的 resToWeightMap 数据结构),所以这里的资源均衡是包含 CPU 和内存在内的所有资源的综合考量。另一方面,在 Score 阶段,除了`NodeResourcesLeastAllocated`算法以外,调用器还会使用到其他算法(例如`InterPodAffinity`)进行分数的评定。
> 注:在 K8s 调度器中,会把调度过程分为若干个阶段,即 Pre filter, Filter, Post filter, Score 等。在 Pre filter 阶段,用于选择符合 Pod Spec 描述的 Nodes 。
### QoS 是如何影响 K8s 调度决策的?
QOS 作为 K8s 中一种资源保护机制,其主要是针对不可压缩资源比如的内存的一种控制技术,比如在内存中其通过为不同的 pod 和容器构造 OOM 评分,并且通过内核的策略的辅助,从而实现当节点内存资源不足的时候,内核可以按照策略的优先级,优先 kill 掉哪些优先级比较低分值越高优先级越低的pod。相关源码如下:
```golang
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
if types.IsCriticalPod(pod) {
// Critical pods should be the last to get killed.
return guaranteedOOMScoreAdj
}
switch v1qos.GetPodQOS(pod) {
case v1.PodQOSGuaranteed:
// Guaranteed containers should be the last to get killed.
return guaranteedOOMScoreAdj
case v1.PodQOSBestEffort:
return besteffortOOMScoreAdj
}
// Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally,
// we want to protect Burstable containers that consume less memory than requested.
// The formula below is a heuristic. A container requesting for 10% of a system's
// memory will have an OOM score adjust of 900. If a process in container Y
// uses over 10% of memory, its OOM score will be 1000. The idea is that containers
// which use more than their request will have an OOM score of 1000 and will be prime
// targets for OOM kills.
// Note that this is a heuristic, it won't work if a container has many small processes.
memoryRequest := container.Resources.Requests.Memory().Value()
oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity
// A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure
// that burstable pods have a higher OOM score adjustment.
if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) {
return (1000 + guaranteedOOMScoreAdj)
}
// Give burstable pods a higher chance of survival over besteffort pods.
if int(oomScoreAdjust) == besteffortOOMScoreAdj {
return int(oomScoreAdjust - 1)
}
return int(oomScoreAdjust)
}
```
## 总结
Kubernetes 作为一个具有良好移植和扩展性的开源平台,用于管理容器化的工作负载和服务。 Kubernetes 拥有一个庞大且快速增长的生态系统,已成为容器编排领域的事实标准。但是也不可避免地引入许多复杂性。而 KubeSphere 作为国内唯一一个开源的 KubernetesK8s发行版极大地降低了使用 Kubernetes
的门槛。借助于 KubeSphere 平台,原先需要通过后台命令行和 yaml 文件管理的系统配置,现在只需要在简介美观的 UI 界面上轻松完成。本文从云原生应用部署阶段`requests`和`limits`的设置问题其入,分析了相关 K8s 底层的工作原理以及如何通过 KubeSphere 平台简化相关的运维工作。
## 参考文献
- https://learnk8s.io/setting-cpu-memory-limits-requests
- https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
- https://docs.oracle.com/cd/E13150_01/jrockit_jvm/jrockit/jrdocs/refman/optionX.html
- https://kubesphere.com.cn/forum/d/1155-k8s
- https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/
- https://kubernetes.io/docs/concepts/policy/limit-range/
- https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
- https://www.kernel.org/doc/Documentation/scheduler/sched-design-CFS.txt
- https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu
- https://medium.com/omio-engineering/cpu-limits-and-aggressive-throttling-in-kubernetes-c5b20bd8a718

View File

@ -0,0 +1,118 @@
---
title: '除夕夜,来 KubeSphere 社区领证吧!'
tag: 'KubeSphere,社区'
keyword: '社区, 开源, 贡献, KubeSphere'
description: 'KubeSphere 社区管理委员会向此次获得 2020 年度评选产生的 KubeSphere Member、 KubeSphere Ambassador、KubeSphere Talented Speaker、KubeSphere Contributor 成员致谢。'
createTime: '2021-02-11'
author: '周鹏飞'
snapshot: 'https://pek3b.qingstor.com/kubesphere-community/images/20210211222956.png'
---
<video controls="controls" style="width: 100% !important; height: auto !important;">
<source type="video/mp4" src="https://kubesphere-community.pek3b.qingstor.com/videos/meetup%20final%200131.mp4">
</video>
除夕夜辞旧迎新,感谢 KubeSphere 社区所有的贡献者与合作伙伴在过去一年来任何形式的合作与贡献。
我们非常庆幸 KubeSphere 社区到二月份总计有了近百位贡献者加入,全年诞生了 10 位 Ambassador15 位 Talented Speaker22 位优秀贡献者,还有 6 位具有突出贡献的活跃贡献者实力入选 KubeSphere Member均由 KubeSphere 社区技术委员会与 KubeSphere 指导委员会投票通过。
## 公开致谢
KubeSphere 社区管理委员会向此次获得 2020 年度评选产生的 KubeSphere Member、 KubeSphere Ambassador、KubeSphere Talented Speaker、KubeSphere Contributor 成员致谢,并向所有参与过 KubeSphere 社区开源贡献的小伙伴致以 2021 春节最诚挚的问候,社区有一份专属的礼物等你来领!
![合影](https://pek3b.qingstor.com/kubesphere-community/images/20210211222956.png)
## KubeSphere Member
KubeSphere Member 是授予给深度参与了 KubeSphere 社区开源贡献的成员,以下成员在社区独立完成了一个或多个功能特性开发、文档撰写以及测试,并帮助 KubeSphere 在多个社区积极布道,推广开源技术,目前已全部邀请加入了 KubeSphere 的 Github 组织。
| 姓名 | 证书 |
| ---- | ---- |
|Jie Chen|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-chenjie.png) |
|Ling Li|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-liling.png) |
|Honglei Shen|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-shenhonglei.png) |
|Turtle Chang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-turtlechang.png) |
|Jian Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-zhagnjian.png) |
|Kai Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/member-zhangkai.png) |
## KubeSphere Talented Speaker
KubeSphere Talented Speaker 是授予在 KubeSphere 社区云原生技术直播活动、 2020 年度北京 Meetup、CNCF Webinar 上进行过公开技术分享的优秀讲师,他们是帮助 KubeSphere 社区积极布道的技术传播者,具备出色的技术演讲能力。
| 姓名 | 证书 |
| ---- | ---- |
|Benjamin Huo|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-benjaminhuo.png) |
|Tian Fang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-fangtian.png) |
|Leo Liu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-leoliu.png) |
|Lei Song|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-songlei.png) |
|Juan Xu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-xujuan.png)|
|Zhanling Chen|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-chenzhanling.png) |
|Jeff Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-jeffzhang.png) |
|Yueliang Liu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-liuyueliang.png) |
|Tally Lee|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-tallylee.png) |
|Xiaofei Yang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-yangxiaofei.png) |
|Jiong Duan|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-duanjiong.png) |
|Wanjun Lei|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-leiwanjun.png) |
|Michael Yuan|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-michaelyuan.png) |
|Wenhu Wang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-wangwenhu.png) |
|Ke Zhou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/speaker-zhouke.png) |
## KubeSphere Ambassador
KubeSphere Ambassador 是授予在帮助 KubeSphere 社区积极布道的技术传播者以下成员在KubeSphere 社区多次分享过 KubeSphere 落地实践案例与技术文章,帮助更多用户熟悉 KubeSphere 的应用场景与最佳实践。
| 姓名 | 证书 |
| ---- | ---- |
|Guoyou Li|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-liguoyou.png) |
|Pahud|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-pahud.png) |
|Hengjun Wei|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-weihengjun.png) |
|Lei Xue|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-xuelei.png) |
|Yang Yang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-yangyang.png)|
|Xingmin Lu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-luxingmin.png) |
|Haitao Pan|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-panhaitao.png) |
|Will Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-willzhang.png) |
|Xinglin Xue|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-xuexinglin.png) |
|Haili Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/ambassador-zhanghaili.png) |
## KubeSphere Contributor
KubeSphere Contributor 是授予 2020 年参与过 KubeSphere 开源贡献的成员,或在 KubeSphere 中文论坛发布过优质的技术博客,也包含了帮助社区提交与修复 Bug 的成员。
| 姓名 | 证书 |
| ---- | ---- |
|Ader Fu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-aderfu.png) |
|Howie Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-howiezhang.png) |
|Wenbin Liao|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-liaowenbin.png) |
|Renee Teng|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-reneeteng.png) |
|Hui Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-zhanghui.png)|
|Changjie Fu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-fuchangjie.png) |
|Tao Hu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-hutao.png) |
|Ruichao Lin|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-linruichao.png) |
|The Way|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-theway.png) |
|Wei Zhang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-zhangwei.png) |
|Huihui Fu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-fuhuihui.png)|
|Jack Zhou|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-jackzhou.png)|
|Gang Liu|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-liugang.png)|
|Wei Wang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-wangwei.png) |
|Shiwen Gong|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-gongshiwen.png) |
|Chen Jin|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-jinchen.png) |
|Junjie Ma|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-majunjie.png) |
|Zehuai Wang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-wangzehuai.png) |
|Shanjie Han|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-hanshanjie.png) |
|Yuanpeng Liang|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-liangyuanpeng.png) |
|Hongbing Pei|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-peihongbing.png) |
|Xinyang Xie|[点击查看下载证书](https://kubesphere-community.pek3b.qingstor.com/images/certificates/contributor-xiexinyang.png) |
还有很多默默在 GitHub 提交过 Pull Request 的来自全球各地的社区贡献者,我们尚且还不知道这些 Contributor 的名字或联系方式,社区已在项目仓库 Readme 公开致谢。我们给所有参与过 KubeSphere 开源贡献的成员都送上了一份定制 Logo 的纪念周边,如果你正好也给社区提交过 Pull Request 并被社区合并,但没有出现在上述公开名单中,请微信联系助手小 KK `kubesphere`,我们将为您定制 KubeSphere 证书与纪念周边。
## KubeSphere 社区合作伙伴
除了上述列出的社区贡献成员KubeSphere 社区还特别致谢来自 AWS Community、Apache APISIX、Apache SkyWalking、DockOne 社区、K8s 中文社区、锐捷网络、WeDataSphere 开源社区微众银行、航天网信、Jenkins 中文社区、TesterHome、SegmentFault、云原生社区、Istio Community、KubeEdge 社区、openEuler 社区、星火网校、中通快递、掌门教育的开发者,以及所有社区合作伙伴。
![](https://ap3.qingstor.com/kubesphere-website/docs/20201229150707.png)
## 展望新年
2020 是艰难的一年,展望 2021 年,借用我个人非常喜欢的毛爷爷写的一首诗句:“雄关漫道真如铁,而今迈步从头越”,相信大家已经为新的挑战做好了准备。
社区讲究价值认同,所以社区能把一群具有共识的人聚集在一起,当一群拥有共同目标的人组成了一个社区朝着同一个方向发力,没有什么事情是做不成的。开源靠大家,新的一年,让我们一起为 KubeSphere 社区贡献自己的智慧源泉吧!

View File

@ -5,47 +5,47 @@ layout: "scenario"
css: "scss/scenario.scss"
section1:
title: KubeSphere DevOps offers end-to-end workflow and integrates popular CI/CD tools to boost delivery.
content: KubeSphere DevOps provides CI/CD pipeline based on Jenkins, and offers automated workflows including binary-to-image (B2I) and source-to-image (S2I), helps organizations accelerate time to market for their product.
title: KubeSphere DevOps 提供端到端的工作流,集成主流 CI/CD 工具,提升交付能力
content: KubeSphere DevOps 提供基于 Jenkins 的 CI/CD 流水线,支持自动化工作流,包括 Binary-to-Image (B2I) 和 Source-to-Image (S2I) 等,帮助不同的组织加快产品上市时间。
image: /images/devops/banner.jpg
image: /images/devops/dev-ops.png
section2:
title: Automatically Checkout Code, Test, Analyse, Build, Deploy and Release
title: 自动检出 (Checkout) 代码、测试、分析、构建、部署并发布
list:
- title: Out-of-box CI/CD Pipeline
- title: 开箱即用的 CI/CD 流水线
image: /images/devops/CD-pipeline.png
contentList:
- content: <span>Easy to integrate with your SCM,</span> supporting GitLab / GitHub / BitBucket / SVN
- content: <span>Design a graphical editing panel</span> to create CI/CD pipelines, without writing Jenkinsfile
- content: <span>Integrate SonarQube</span> to implement source code quality analysis
- content: <span>Support dependency cache</span> to accelerate build and deployment
- content: <span>Provide dynamic build agents</span> to automatically spin up Pods as necessary
- content: <span>易于集成至您的 SCM</span>支持 GitLab/GitHub/BitBucket/SVN
- content: <span>图形编辑面板设计,</span>可创建 CI/CD 流水线且无需编写 Jenkinsfile
- content: <span>集成 SonarQube</span>实现源代码质量分析
- content: <span>支持依赖项缓存,</span>加快构建和部署
- content: <span>动态构建 Agent</span>根据需要自动创建 Pod
- title: Built-in Automated Toolkits
- title: 内置自动化工具箱
image: /images/devops/Built-in-automated-toolkits.png
contentList:
- content: <span>Source to Image</span> builds reproducible container images from source code without writing dockerfile
- content: <span>Binary-to-image</span> is the bridge between your artifact and a runnable image
- content: <span>Support automatically building and pushing</span> images to any registry, and finally deploy them to Kubernetes
- content: <span>Provide excellent recoverability and flexibility</span> as you can rebuild and rerun S2I / B2I whenever a patch is needed
- content: <span>Source-to-Image</span> 从源代码构建可再现容器镜像,无需编写 Dockerfile
- content: <span>Binary-to-image</span> 将您的制品自动构建成可运行镜像
- content: <span>支持自动化构建和推送</span>镜像至任意仓库,并最终部署至 Kubernetes
- content: <span>卓越的可恢复性和灵活性,</span>您可以在需要补丁时重新构建并重新运行 S2I/B2I
- title: Use GitOps to implement DevOps, not just culture
- title: 使用 GitOps 实现 DevOps
image: /images/devops/Clear-insight.png
contentList:
- content: <span>Combine Git with Kubernetes convergence, and automates the cloud native Apps delivery</span>
- content: <span>Designed for teams, offer built-in multitenancy in DevOps project</span>
- content: <span>Liable to be observable,</span> provide dynamic logs for the S2I / B2I build and pipeline
- content: Provide audit, alert and notification in pipeline, ensuring issues can be quickly located and solved
- content: Support adding Git SCM webhooks to trigger a Jenkins build when new commits are submitted to the branch
- content: <span>融合 Git 和 Kubernetes实现云原生应用自动化交付</span>
- content: <span>基于 KubeSphere 多租户体系,为 DevOps 工程团队打造合作平台</span>
- content: <span>易于观察,</span>为 S2I/B2I 构建以及流水线提供动态日志
- content: 在流水线中提供审计、告警和通知功能,确保快速定位并解决问题
- content: 支持添加 Git SCM Webhook在提交新的 Commit 到分支时触发 Jenkins 构建
section3:
title: See KubeSphere One-stop DevOps Workflow In Action
title: 观看 KubeSphere 一站式 DevOps 工作流操作演示
videoLink: https://www.youtube.com/embed/c3V-2RX9yGY
image: /images/service-mesh/15.jpg
content: Want to get started in action by following the hands-on lab?
btnContent: Start Hands-on Lab
content: 想自己动手体验实际操作?
btnContent: 开始动手实验
link: https://kubesphere.com.cn/docs/pluggable-components/devops/
bgLeft: /images/service-mesh/3-2.svg
bgRight: /images/service-mesh/3.svg

View File

@ -1,5 +1,5 @@
---
linkTitle: "External Applications"
linkTitle: "外部应用"
weight: 14300
_build:

View File

@ -0,0 +1,205 @@
---
title: "在 KubeSphere 中部署 TiDB Operator 和 TiDB 集群"
keywords: 'KubeSphere, Kubernetes, TiDB, TiDB Operator, TiDB Cluster'
description: '如何在 KubeSphere 中部署 TiDB Operator 和 TiDB 集群'
linkTitle: "部署 TiDB Operator 和 TiDB 集群"
weight: 14320
---
[TiDB](https://en.pingcap.com/) is a cloud-native, open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads. It features horizontal scalability, strong consistency, and high availability.
This tutorial demonstrates how to deploy TiDB Operator and a TiDB Cluster on KubeSphere.
## Prerequisites
- You need to enable [the OpenPitrix system](../../../pluggable-components/app-store/).
- You need to create a workspace, a project, and two user accounts (`ws-admin` and `project-regular`) for this tutorial. The account `ws-admin` must be granted the role of `workspace-admin` in the workspace, and the account `project-regular` must be invited to the project with the role of `operator`. If they are not ready, refer to [Create Workspaces, Projects, Accounts and Roles](../../../quick-start/create-workspace-and-project/).
## Hands-on Lab
### Step 1: Install TiDB Operator CRD
1. Log in to KubeSphere Web console as `admin`, and use **Kubectl** from the **Toolbox** in the bottom right corner to execute the following command to install TiDB Operator CRD:
```bash
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/manifests/crd.yaml
```
2. You can see the expected output as below:
```bash
customresourcedefinition.apiextensions.k8s.io/tidbclusters.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backups.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/restores.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/backupschedules.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbmonitors.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbinitializers.pingcap.com created
customresourcedefinition.apiextensions.k8s.io/tidbclusterautoscalers.pingcap.com created
```
### Step 2: Add an app repository
1. Log out of KubeSphere and log back in as `ws-admin`. In your workspace, go to **App Repos** under **Apps Management**, and then click **Add Repo**.
![add-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-repo.PNG)
2. In the dialog that appears, enter `pingcap` for the app repository name and `https://charts.pingcap.org` for the PingCAP Helm repository URL. Click **Validate** to verify the URL and you will see a green check mark next to the URL if it is available. Click **OK** to continue.
![add-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/add-pingcap-repo.PNG)
3. Your repository displays in the list after successfully imported to KubeSphere.
![added-pingcap-repo](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/added-pingcap-repo.PNG)
### Step 3: Deploy TiDB Operator
1. Log out of KubeSphere and log back in as `project-regular`. In your project, go to **Applications** under **Application Workloads** and click **Deploy New Application**.
![deploy-app](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app.PNG)
2. In the dialog that appears, select **From App Templates**.
![from-app-templates](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates.PNG)
3. Select `pingcap` from the drop-down list, then click **tidb-operator**.
![click-tidb-operator](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-operator.PNG)
{{< notice note >}}
This tutorial only demonstrates how to deploy TiDB Operator and a TiDB cluster. You can also deploy other tools based on your needs.
{{</ notice >}}
4. On the **Chart Files** tab, you can view the configuration from the console directly or download the default `values.yaml` file by clicking the icon in the upper right corner. Under **Versions**, select a version number from the drop-down list and click **Deploy**.
![select-version](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/select-version.PNG)
5. On the **Basic Info** page, confirm the app name, app version, and deployment location. Click **Next** to continue.
![basic-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/basic-info.PNG)
6. On the **App Config** page, you can either edit the `values.yaml` file, or click **Deploy** directly with the default configurations.
![check-config-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/check-config-file.PNG)
7. Wait for TiDB Operator to be up and running.
![tidb-operator-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-operator-running.PNG)
8. Go to **Workloads**, and you can see two Deployments created for TiDB Operator.
![tidb-deployment](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-deployment.PNG)
### Step 4: Deploy a TiDB cluster
The process of deploying a TiDB cluster is similar to deploying TiDB Operator.
1. Go to **Applications** under **Application Workloads**, click **Deploy New Application** again, and then select **From App Templates**.
![deploy-app-again](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/deploy-app-again.PNG)
![from-app-templates-2](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/from-app-templates-2.PNG)
2. From the PingCAP repository, click **tidb-cluster**.
![click-tidb-cluster](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/click-tidb-cluster.PNG)
3. On the **Chart Files** tab, you can view the configuration and download the `values.yaml` file. Click **Deploy** to continue.
![download-yaml-file](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/download-yaml-file.PNG)
4. On the **Basic Info** page, confirm the app name, app version, and deployment location. Click **Next** to continue.
![tidb-cluster-info](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-info.PNG)
5. Some TiDB components require [persistent volumes](../../../cluster-administration/persistent-volume-and-storage-class/). You can run the following command to view your storage classes.
```
/ # kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
csi-high-capacity-legacy csi-qingcloud Delete Immediate true 71m
csi-high-perf csi-qingcloud Delete Immediate true 71m
csi-ssd-enterprise csi-qingcloud Delete Immediate true 71m
csi-standard (default) csi-qingcloud Delete Immediate true 71m
csi-super-high-perf csi-qingcloud Delete Immediate true 71m
```
6. On the **App Config** page, change the default value of the field `storageClassName` from `local-storage` to the name of your storage class. For example, you can change it to `csi-qingcloud` based on the above output.
![tidb-cluster-config](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-config.PNG)
{{< notice note >}}
Only the field `storageClassName` is changed to provide external persistent storage. If you want to deploy each TiDB component, such as [TiKV](https://docs.pingcap.com/tidb/dev/tidb-architecture#tikv-server) and [Placement Driver](https://docs.pingcap.com/tidb/dev/tidb-architecture#placement-driver-pd-server), to individual nodes, specify the field `nodeAffinity`.
{{</ notice >}}
7. Click **Deploy** and you can see two apps in the list as shown below:
![tidb-cluster-app-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-app-running.PNG)
### Step 5: View TiDB cluster status
1. Go to **Workloads** under **Application Workloads**, and verify that all TiDB cluster Deployments are up and running.
![tidb-cluster-deployments-running](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-cluster-deployments-running.PNG)
2. Switch to the **StatefulSets** tab, and you can see TiDB, TiKV and PD are up and running.
![tidb-statefulsets](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-statefulsets.PNG)
{{< notice note >}}
TiKV and TiDB will be created automatically and it may take a while before they display in the list.
{{</ notice >}}
3. Click a single StatefulSet to go to its detail page. You can see the metrics in line charts over a period of time under the **Monitoring** tab.
TiDB metrics:
![tidb-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-metrics.PNG)
TiKV metrics:
![tikv-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-metrics.PNG)
PD metrics:
![pd-metrics](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/pd-metrics.PNG)
4. In **Pods** under **Application Workloads**, you can see the TiDB cluster contains two TiDB Pods, three TiKV Pods, and three PD Pods.
![tidb-pod-list](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-pod-list.PNG)
5. In **Volumes** under **Storage**, you can see TiKV and PD are using persistent volumes.
![tidb-storage-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-storage-usage.PNG)
6. Volume usage is also monitored. Click a volume item to go to its detail page. Here is an example of TiKV:
![tikv-volume-status](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tikv-volume-status.PNG)
7. On the **Overview** page of the project, you can see a list of resource usage in the current project.
![tidb-project-resource-usage](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-project-resource-usage.PNG)
### Step 6: Access the TiDB cluster
1. Go to **Services** under **Application Workloads**, and you can see detailed information of all Services. As the Service type is set to `NodePort` by default, you can access it through the Node IP address outside the cluster.
![tidb-service](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service.PNG)
3. TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. For example, you can access Grafana through `{$NodeIP}:{Nodeport}` to view metrics.
![tidb-service-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-service-grafana.PNG)
![tidb-grafana](/images/docs/appstore/external-apps/deploy-tidb-operator-and-cluster/tidb-grafana.PNG)
{{< notice note >}}
You may need to open the port in your security groups and configure related port forwarding rules depending on where your Kubernetes cluster is deployed.
{{</ notice >}}

View File

@ -11,7 +11,7 @@ icon: "/images/docs/docs.svg"
---
在 KubeSphere 中,您可以使用交互式 Web 控制台或内置的本地命令行工具`kubectl`设置集群并配置其功能。作为集群管理员,您将负责一系列任务,包括在节点上管理调度并添加标签,控制集群可见性,​​监控集群状态,设置集群的告警范围和通知规则,以及配置存储和日志收集的方案等。
在 KubeSphere 中,您可以使用交互式 Web 控制台或内置的原生命令行工具 `kubectl`设置集群并配置其功能。作为集群管理员,您将负责一系列任务,包括在节点上管理调度并添加标签、控制集群可见性、​​监控集群状态、设置集群的告警规则和通知规则,以及配置存储和日志收集解决方案等。
{{< notice note >}}
@ -19,72 +19,72 @@ icon: "/images/docs/docs.svg"
{{</ notice >}}
## [持久卷 (PV) 和存储类 (Storage Class)](../cluster-administration/persistent-volume-and-storage-class/)
了解持久卷 (PV),持久卷实例 (PVC) 和存储类 (Storge Class) 的基本概念,并演示如何在 KubeSphere 中管理存储类和持久卷实例。
## [节点管理](../cluster-administration/nodes/)
监控节点状态并了解如何添加节点标签和污点。
## [集群状态监控](../cluster-administration/cluster-status-monitoring/)
根据不同的指标(包括物理资源、ETCD 和 APIServer监控集群如何运行。
根据不同的指标(包括物理资源、etcd 和 APIServer监控集群如何运行。
## [应用资源监控](../cluster-administration/application-resources-monitoring/)
监控集群中的应用资源,比如不同项目的 Deployment 数量和 CPU 使用情况。
监控集群中的应用资源,例如不同项目的部署数量和 CPU 使用情况。
## 群集范围的告警和通知
## [持久卷和存储类型](../cluster-administration/persistent-volume-and-storage-class/)
### [kubeSphere 中的 Alertmanager](../cluster-administration/cluster-wide-alerting-and-notification/alertmanager/)
了解 PV、PVC 和存储类型的基本概念,并演示如何在 KubeSphere 中管理存储类型和 PVC。
## 集群告警和通知
### [KubeSphere 中的 Alertmanager](../cluster-administration/cluster-wide-alerting-and-notification/alertmanager/)
了解如何在 KubeSphere 中使用 Alertmanager 管理告警。
### [通知管理器](../cluster-administration/cluster-wide-alerting-and-notification/notification-manager/)
### [Notification Manager](../cluster-administration/cluster-wide-alerting-and-notification/notification-manager/)
了解如何使用通知管理器管理通知。
了解如何使用 Notification Manager 管理通知。
### [告警策略 (节点级别)](../cluster-administration/cluster-wide-alerting-and-notification/alerting-policy/)
### [告警策略(节点级别)](../cluster-administration/cluster-wide-alerting-and-notification/alerting-policy/)
了解如何为节点设置告警策略。
### [告警信息 (节点级别)](../cluster-administration/cluster-wide-alerting-and-notification/alerting-message/)
### [告警消息(节点级别)](../cluster-administration/cluster-wide-alerting-and-notification/alerting-message/)
了解如何查看节点的告警策略。
了解如何查看节点的告警策略并处理告警消息
## 集群设置
### [集群可见性与权限](../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)
### [集群可见性和授权](../cluster-administration/cluster-settings/cluster-visibility-and-authorization/)
了解如何设置集群的可见性以及授权。
了解如何设置集群可见性和授权。
### 日志收集
#### [介绍](../cluster-administration/cluster-settings/log-collections/introduction/)
了解集群日志收集的基础知识,包括工具和常规步骤。
了解集群日志收集的基础知识,包括工具和一般步骤。
#### [将Elasticsearch作为接收者](../cluster-administration/cluster-settings/log-collections/add-es-as-receiver/)
#### [添加 Elasticsearch 作为接收器](../cluster-administration/cluster-settings/log-collections/add-es-as-receiver/)
了解如何添加 Elasticsearch 来接收日志、事件或审计日志。
#### [将Kafka添加为接收者](../cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/)
#### [添加 Kafka 作为接收器](../cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver/)
了解如何添加 Kafka 来接收日志、事件或审计日志。
#### [将Fluentd添加为接收者](../cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/)
#### [添加 Fluentd 作为接收器](../cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver/)
了解如何添加 Fluentd 来接收日志、事件或审计日志。
### [邮件服务器](../cluster-administration/cluster-settings/mail-server/)
自定义您的电子邮件地址设置,以接收任何告警的通知。
自定义您的电子邮件地址设置,以接收来自告警的通知。
## [自定义平台信息](../cluster-administration/platform-settings/)
## [平台设置](../cluster-administration/platform-settings/)
自定义平台设置,如 Logo、标题等。
自定义平台设置,如 Logo、标题等。
## [关闭和重启集群](../cluster-administration/shut-down-and-restart-cluster-gracefully/)
了解如何优雅地关闭集群以及如何重新启动它
了解如何平稳地关闭和重启集群

View File

@ -1,6 +1,6 @@
---
title: "邮件服务器"
keywords: 'KubeSphere, Kubernetes, Notification, Mail Server'
keywords: 'KubeSphere, Kubernetes, 通知, 邮件服务器'
description: '邮件服务器'
linkTitle: "邮件服务器"
@ -9,26 +9,25 @@ weight: 8630
## 目标
本指南演示告警策略的电子邮件通知设置(支持自定义设置)。 您可以指定用户电子邮件地址以接收告警消息。
本指南演示告警策略的电子邮件通知设置(支持自定义设置)。您可以指定用户电子邮件地址以接收告警消息。
## 前提条件
## 准备工作
[KubeSphere Alerting and Notification](../../../pluggable-components/alerting-notification/) 需要启用
需要启用 [KubeSphere 告警和通知系统](../../../pluggable-components/alerting-notification/)。
## 动手实验
## 动手实验
1. 使用具有 ` platform-admin` 角色的一个帐户登录 Web 控制台。
2. 点击左上角的平台管理,然后选择集群管理。
1. 使用具有 ` platform-admin` 角色的帐户登录 Web 控制台。
2. 点击左上角的**平台管理**,然后选择**集群管理**
![mail_server_guide](/images/docs/alerting/mail_server_guide-zh.png)
![mail_server_guide](/images/docs/zh-cn/cluster-administration/cluster-settings/mail-server/mail_server_guide.png)
1. 从列表中选择一个集群并输入它(如果您未启用[多集群功能](../../../multicluster-management/),则将直接转到**概述**页面)。
2. 在**群集设置**下选择**邮件服务器**。 在页面中,提供您的邮件服务器配置和 SMTP 身份验证信息,如下所示:
- **SMTP 服务器地址**:填写可以提供邮件服务的 SMTP 服务器地址。 端口通常是 25。
- **使用 SSL 安全连接**SSL 可用于加密邮件,从而提高了邮件传输信息的安全性。 通常,您必须为邮件服务器配置证书。
- SMTP 验证信息:如下填写 **SMTP 用户****SMTP 密码****发件人电子邮件地址**等
3. 从列表中选择一个集群并进入该集群(如果您未启用[多集群功能](../../../multicluster-management/),则会直接转到**概览**页面)。
4. 在**集群设置**下选择**邮件服务器**。请在该页面提供您的邮件服务器配置和 SMTP 身份验证信息,如下所示:
- **SMTP 服务器地址**:填写可以提供邮件服务的 SMTP 服务器地址。端口通常是 25。
- **使用 SSL 安全连接**SSL 可用于加密邮件,从而提高邮件传输信息的安全性。通常,您需要为邮件服务器配置证书。
- SMTP 身份验证信息:请填写 **SMTP 用户**、**SMTP 密码**、**发件人电子邮件地址**等,如下所示。
![mail_server_config](/images/docs/alerting/mail_server_config-zh.png)
5. 完成上述设置后,单击**保存**。 您可以发送测试电子邮件以验证服务器配置是否成功。
![mail_server_config](/images/docs/zh-cn/cluster-administration/cluster-settings/mail-server/mail-server-config.PNG)
5. 完成上述设置后,请点击**保存**。您可以发送一封测试电子邮件以验证服务器配置是否成功。

View File

@ -1,53 +1,53 @@
---
title: "告警消息(节点级别)"
keywords: 'KubeSphere, Kubernetes, Node, Alerting, Message, Notification'
description: '如何在节点级别查看告警息。'
keywords: 'KubeSphere, Kubernetes, 节点, 告警, 消息, 通知'
description: '如何在节点级别查看告警息。'
linkTitle: "告警消息(节点级别)"
weight: 8540
---
告警信息记录根据警报规则触发的告警的详细信息,包括监控目标,告警策略,最近的通知和注释
## 前提条件
告警消息会记录按照告警规则触发的告警的详细信息,包括监控目标、告警策略、最近通知和处理意见
## 准备工作
您已创建节点级告警策略并收到有关它的告警通知。 如果尚未准备就绪,请先先参考[告警策略(节点级别)](../alerting-policy/) 创建一个。
您已创建节点级别的告警策略并收到该策略的告警通知。如果尚未准备就绪,请先参考[告警策略(节点级别)](../alerting-policy/)创建一个告警策略
## 动手实验
### 任务 1: 查看告警信息
### 任务 1查看告警信息
1. 使用一个被授予`平台管理员`角色的帐户登录控制台。.
1. 使用具有 `platform-admin` 角色的帐户登录控制台。
2. 击左上角的**平台管理**,然后选择**集群管理**。
2. 击左上角的**平台管理**,然后选择**集群管理**。
![alerting_message_node_level_guide](/images/docs/alerting-zh/alerting_message_node_level_guide.png)
![选择集群管理](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_guide.png)
3. 从列表中选择一个集群并进入(如果您未启用[多集群特性](../../../multicluster-management/),则将直接转到**总览**页面)。
3. 从列表中选择一个集群并进入该集群(如果您未启用[多集群功能](../../../multicluster-management/),则将直接转到**概览**页面)。
4. 导航到**监控告警**下的**告警消息** 您可以在列表中看到告警消息。您可以在列表中看到警告消息。在[告警策略(节点级别)](../alerting-policy/)的示例中,您将一个节点设置为监控目标,它的内存利用率高于`50%`的阈值,因此您可以看到它的警报消息。
4. 转到**监控告警**下的**告警消息**,您可以在列表中看到告警消息。在[告警策略(节点级别)](../alerting-policy/)的示例中,您将一个节点设置为监控目标并且它的内存利用率高于 `50%` 的阈值,因此您可以看到它的告警消息。
![alerting_message_node_level_list](/images/docs/alerting-zh/alerting_message_node_level_list.png)
![告警消息列表](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_list.png)
5. 单击告警消息以进入详细信息页面。 在**告警详细信息**中,您可以看到节点的内存利用率随时间变化的图表,该图表显示内存一直高于警规则中设置的`50`阈值,因此触发了警
5. 点击该告警消息进入详情页面。在**告警详情**中,您可以看到节点的内存利用率随时间变化的图表,该图表显示内存一直高于警规则中设置的 `50`阈值,因此触发了警。
![alerting_message_node_level_detail](/images/docs/alerting-zh/alerting_message_node_level_detail.png)
![告警详情](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_detail.png)
### 任务 2: 查看告警策略
### 任务 2查看告警策略
切换到**告警策略**查看与此告警消息相对应的告警策略,您可以在[告警策略(节点级别)](../alerting-policy/)示例中看到设置的触发规则。
![alerting_message_node_level_policy](/images/docs/alerting-zh/alerting_message_node_level_policy.png)
![告警策略](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_policy.png)
### 任务 3: 查看最近的通知
### 任务 3:查看最近通知
1. 切换到**最近通知**。 可以看到已收到3条通知因为该通知规则设置的自定义重复规则为`每5分钟告警1次`和`最多重发3次`。
1. 切换到**最近通知**。您可以看到已收到 3 条通知,因为该通知规则设置的重复周期为`每 5 分钟警告1次`和`最多重发3次`。
![alerting_message_node_level_notification](/images/docs/alerting-zh/alerting_message_node_level_notification.png)
![告警通知](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_notification.png)
2. 登录到配置的告警发送邮箱查看KubeSphere邮件服务器发送的警通知邮件。您一共会收到3封邮件。
2. 登录到配置的邮箱,查看 KubeSphere 邮件服务器发送的警通知邮件。您一共会收到 3 封邮件。
### 任务 4: 添加处理意见
### 任务 4添加处理意见
单击**处理意见**将意见添加到当前警报消息。 例如,由于节点的内存利用率高于基于警报规则设置的阈值,因此您可以在下面的对话框中添加注释:`节点需要添加污点标记,并且不允许将新的容器组调度到该节点`。
点击**处理意见**将处理意见添加到当前告警消息。例如,由于节点的内存利用率高于告警规则中设置的阈值,因此您可以在下面的对话框中添加处理意见:`节点需要添加污点,并且不允许将新的 Pod 调度到该节点`。
![alerting_message_node_level_comment](/images/docs/alerting-zh/alerting_message_node_level_comment.png)
![处理意见](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-message-node-level/alerting_message_node_level_comment.PNG)

View File

@ -1,7 +1,7 @@
---
title: "告警策略(节点级别)"
keywords: 'KubeSphere, Kubernetes, Node, Alerting, Policy, Notification'
description: '如何在节点级设置告警策略。'
keywords: 'KubeSphere, Kubernetes, 节点, 告警, 策略, 通知'
description: '如何在节点级设置告警策略。'
linkTitle: "告警策略(节点级别)"
weight: 8530
@ -9,95 +9,91 @@ weight: 8530
## 目标
KubeSphere为节点和工作负载提供告警策略。 本指南演示如何为集中的节点创建告警策略以及如何配置邮件通知。如需了解如何为工作负载配置告警策略请参阅[告警策略(工作负载级别)](../../../project-user-guide/alerting/alerting-policy/)
KubeSphere 为节点和工作负载提供告警策略。本指南演示如何为集中的节点创建告警策略以及如何配置电子邮件通知。如需了解如何为工作负载配置告警策略,请参见[告警策略(工作负载级别)](../../../project-user-guide/alerting/alerting-policy/)。
## 前提条件
## 准备工作
- [KubeSphere告警和通知](../../../pluggable-components/alerting-notification/)功能需要启用
- [邮件服务器](../../../cluster-administration/cluster-settings/mail-server/) 需要配置
- 您需要启用 [KubeSphere 告警和通知系统](../../../pluggable-components/alerting-notification/)。
- 您需要配置[邮件服务器](../../../cluster-administration/cluster-settings/mail-server/)。
## 动手实验
### 任务 1: 创建一个告警策略
### 任务 1创建一个告警策略
1. 使用一个被授予`平台管理员`角色的帐户登录控制台。
1. 使用具有 `platform-admin` 角色的帐户登录控制台。
2. 击左上角的**平台管理**,然后选择**集群管理**。
2. 击左上角的**平台管理**,然后选择**集群管理**。
![alerting_policy_node_level_guide](/images/docs/alerting-zh/alerting_policy_node_level_guide.png)
![选择集群管理](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_guide.png)
3. 从列表中选择一个集群并进入(如果您未启用[多集群特性](../../../multicluster-management/),则将直接转到**总览**页面)。
3. 从列表中选择一个集群并进入该集群(如果您未启用[多集群功能](../../../multicluster-management/),则将直接转到**概览**页面)。
4. 导航到**监控告警**下的**告警策略**,点击 **创建**.
4. 到**监控告警**下的**告警策略**,点击**创建**.
![alerting_policy_node_level_create](/images/docs/alerting-zh/alerting_policy_node_level_create.png)
![点击创建](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_create.png)
### 任务 2: 提供基本信息
### 任务 2提供基本信息
出现的对话框中,填写如下基本信息。 完成后,单击**下一步**。
弹出对话框中,填写如下基本信息。完成操作后,点击**下一步**。
- **名称**: 简洁明了的名称作为其唯一标识符,例如`alert-demo`
- **别名**: 帮助您更好地区分告警策略。 支持中文。
- **描述信息**: 告警策略的简要介绍。
- **名称**:该告警策略的简明名称,例如 `alert-demo`,用作其唯一标识符
- **别名**:帮助您更好地区分告警策略,支持中文。
- **描述信息**告警策略的简要介绍。
![alerting_policy_node_level_basic_info](/images/docs/alerting-zh/alerting_policy_node_level_basic_info.png)
![基本信息](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_basic_info.png)
### 任务 3: 选择监控目标
### 任务 3选择监控目标
在节点列表中选择节点,或使用**节点选择器**选择一组节点作为监控目标。 为了方便演示,此处选择一个节点。 完成后单击“下一步”
在节点列表中选择节点,或使用**节点选择器**选择一组节点作为监控目标。为了方便演示,此处选择一个节点。完成操作后,点击**下一步**
![alerting_policy_node_level_monitoring_target](/images/docs/alerting-zh/alerting_policy_node_level_monitoring_target.png)
![监控目标](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_monitoring_target.png)
{{< notice note >}}
您可以通过以下三种方式从下拉菜单中对节点列表中的节点进行排序:</br>
1. CPU使用率
2. 内存使用率
3. 容器组用量
您可以在下拉菜单中通过以下三种方式对节点列表中的节点进行排序:`按 CPU 使用率排行`、`按内存使用率排行`、`按容器组用量排行`。
{{</ notice >}}
### 任务 4: 添加告警规则
### 任务 4添加告警规则
1. 单击**添加规则**开始创建告警规则。该规则提供丰富的配置,如度量标准类型、检查周期、连续次数、度量阈值和告警级别之类的参数。 检测周期(**规则**下的第二个字段)表示对度量进行两次连续检查之间的时间间隔。 例如,`1分钟/周期`表示每1分钟检查一次指标。 连续次数(**规则**下的第三个字段)表示检查的指标满足阈值的连续次数。 只有当实际次数等于或大于告警策略中设置的连续次数时,才会触发告警。
1. 点击**添加规则**创建告警规则。告警规则定义指标类型、检查周期、连续次数、指标阈值和告警级别等多个参数,可提供丰富配置。检查周期(**规则**下的第二个字段)表示两次连续指标检查之间的时间间隔。例如,`2 分钟/周期`表示每 2 分钟检查一次指标。连续次数(**规则**下的第三个字段)表示检查的指标满足阈值的连续次数。只有当实际次数等于或大于告警策略中设置的连续次数时,才会触发告警。
![alerting_policy_node_level_alerting_rule](/images/docs/alerting-zh/alerting_policy_node_level_alerting_rule.png)
![告警规则](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_alerting_rule.png)
2. 在本示例中,将这些参数分别设置为`内存利用率``1分钟/周期``连续2次``>50`和`重要告警`。这意味着KubeSphere会每分钟检查一次内存利用率如果连续2次大于50%,则会触发此重要告警。
2. 在本示例中,将这些参数分别设置为`内存利用率`、`1 分钟/周期`、`连续2次`、`>`、`50` 和`重要告警`。这意味着 KubeSphere 会每 1 分钟检查一次内存利用率,如果连续 2 次大于 50%,则会触发此重要告警。
3. 完成后,单击 **√** 保存规则,然后单击**下一步**继续。
3. 完成操作后,点击 **√** 保存规则,然后点击**下一步**继续。
{{< notice note >}}
您可以为以下指标创建节点级别的告警策略:
- CPU`CPU利用率`, `CPU 1分钟平均负载`, `CPU 5分钟平均负载`, `CPU 15分钟平均负载`
- 内存: `内存利用率`, `可用内存`
- 磁盘: `inode利用率`, `本地磁盘可用空间`, `本地磁盘空间利用率`, `本地磁盘写入吞吐`, `本地磁盘读吞吐`, `磁盘读iops`, `磁盘写iops`
- 网络: `网络发送数据速率`, `网络接收数据速率`
- 容器组: `容器组异常率`, `容器组利用率`
- CPU`CPU利用率`、`CPU 1分钟平均负载`、`CPU 5分钟平均负载`、`CPU 15分钟平均负载`
- 内存:`内存利用率`、`可用内存`
- 磁盘:`inode利用率`、`本地磁盘可用空间`、`本地磁盘空间利用率`、`本地磁盘写入吞吐量`、`本地磁盘读取吞吐量`、`本地磁盘读取IOPS`、`本地磁盘写入IOPS`
- 网络:`网络发送数据速率`、`网络接收数据速率`
- 容器组:`容器组异常率`、`容器组利用率`
{{</ notice >}}
### 任务 5: 设置通知规则
### 任务 5设置通知规则
1. **有效通知时间范围**用于设置通知电子邮件的发送时间,例如09:00〜19:00。 **通知渠道**目前仅支持电子邮件。 您可以将要通知的成员电子邮件地址添加到**通知列表**
1. **通知有效时间**用于设置通知电子邮件的发送时间,例如 `09:00` 至 `19:00`。 **通知渠道**目前仅支持**邮箱**。您可以在**通知列表**中添加要通知的成员的邮箱地址
2. **自定义重复规则**定义了通知电子邮件的发送频率和重发次数。 如果尚未解决告警,则将在一段时间后重复发送通知。 还可以为不同级别的告警设置不同的重复规则。 由于在上一步中设置的警报级别为**重要告警**,因此在**重要告警**的第二个字段选择`每5分钟警一次`(发送周期),并在第三个字段中选`最多重发3次`(重发次数)。 请参考下图设置通知规则:
2. **自定义重复规则**用于定义通知邮件的发送周期和重发次数。如果告警未被解除,则会在一段时间后重复发送通知。不同级别的告警还可以设置不同的重复规则。上一步中设置的告警级别为`重要告警`,因此在**重要告警**的第二个字段选择`每 5 分钟警一次`(发送周期),并在第三个字段中选`最多重发3次`(重发次数)。请参考下图设置通知规则:
![alerting_policy_node_level_notification_rule](/images/docs/alerting-zh/alerting_policy_node_level_notification_rule.png)
![通知规则](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting_policy_node_level_notification_rule.PNG)
3. 击**创建**,您可以看到告警策略已成功创建。
3. 击**创建**,您可以看到告警策略已成功创建。
{{< notice note >}}
*警报等待时间* **=** *检测周期* **x** *连续次数*。 例如如果检测周期为1分钟/周期并且连续次数为2则需要等待2分钟后才会显示告警消息。
*告警等待时间* **=** *检查周期* **x** *连续次数*。例如,如果检查周期为 1 分钟/周期,并且连续次数为 2则需要等待 2 分钟后才会显示告警消息。
{{</ notice >}}
### 任务 6: 查看告警策略
### 任务 6查看告警策略
成功创建告警策略后,您可以进入其详细信息页面查看状态:告警规则、监控目标、通知规则和告警历史记录等。单击**更多操作**,然后从下拉菜单中选择**更改状态**可以启用或禁用此告警策略。
成功创建告警策略后,您可以进入其详情页面查看状态、告警规则、监控目标、通知规则和告警历史等信息。点击**更多操作**,然后从下拉菜单中选择**更改状态**可以启用或禁用此告警策略。
![alerting-policy-node-level-detail-page](/images/docs/alerting-zh/alerting-policy-node-level-detail-page.png)
![详情页面](/images/docs/zh-cn/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy-node-level/alerting-policy-node-level-detail-page.png)

View File

@ -1,6 +1,6 @@
---
title: "关闭和重启集群"
description: "Demonstrate how to shut down and restart Kubernetes clusters gracefully"
description: "演示如何平稳地关闭和重启 Kubernetes 集群"
layout: "single"
linkTitle: "关闭和重启集群"
@ -9,33 +9,33 @@ weight: 8800
icon: "/images/docs/docs.svg"
---
出于维护原因,您可能需要临时关闭群集。本文档介绍了正常关闭集群的过程以及如何重新启动集群。
您可能需要临时关闭集群进行维护。本文介绍平稳关闭集群的流程以及如何重新启动集群。
{{< notice warning >}}
关闭群集非常危险。 您必须完全了解所做的操作及其后果。 请先进行etcd备份然后再继续。 通常建议一 一维护您的节点,而不是重新启动整个集群。
关闭集群是非常危险的操作。您必须完全了解该操作及其后果。请先进行 etcd 备份,然后再继续。通常情况下,建议您逐个维护节点,而不是重新启动整个集群。
{{</ notice >}}
## 前提条件
## 准备工作
- 关闭群集之前,请先进行[etcd备份](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md#snapshotting-the-keyspace)。
- 主机之间设置了SSH[免密登录](https://man.openbsd.org/ssh.1#AUTHENTICATION)。
- 请先进行 [etcd 备份](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md#snapshotting-the-keyspace),再关闭集群
- 主机之间已设置 SSH [免密登录](https://man.openbsd.org/ssh.1#AUTHENTICATION)。
## 关闭集群
{{< notice tip >}}
- 关闭群集之前必须备份etcd数据因为如果在重新启动群集时遇到任何问题则可以通过etcd还原群集
- 使用本教程中的方法可以正常关闭集群,而数据损坏的可能性仍然存在。
- 关闭集群前,请您务必备份 etcd 数据,以便在重新启动群集时如果遇到任何问题,可以通过 etcd 还原集群
- 使用本教程中的方法可以平稳关闭集群,但数据损坏的可能性仍然存在。
{{</ notice >}}
### 步骤 1: 获取节点列表
### 步骤 1获取节点列表
```bash
nodes=$(kubectl get nodes -o name)
```
### 步骤 2: 关闭所有节点
### 步骤 2关闭所有节点
```bash
for node in ${nodes[@]}
@ -45,45 +45,45 @@ do
done
```
然后,您可以关闭其他群依赖项,例如外部存储。
然后,您可以关闭其他的集群依赖项,例如外部存储。
## 正常重启群集
## 平稳重启集群
在正常关闭集群后,可以正常重启集群。
平稳关闭集群后,您可以平稳重启集群。
### 前提条件
### 准备工作
您已正常关闭集群。
您已平稳关闭集群。
{{< notice tip >}}
大多数情况下,重新启动集群后可以继续正常使用,但是由于意外情况,该集群可能不可用。 例如:
通常情况下,重新启动集群后可以继续正常使用,但是由于意外情况,该集群可能不可用。例如:
- 关闭期间Etcd数据损坏。
- 关闭集群过程中 etcd 数据损坏。
- 节点故障。
- 不可预期的网络错误。
{{</ notice >}}
### 步骤 1: 检查所有群集依赖项的状态
### 步骤 1:检查所有集群依赖项的状态
确保所有集依赖项均已就绪,例如外部存储。
确保所有集依赖项均已就绪,例如外部存储。
### 步骤 2: 打开集群主机电源
### 步骤 2打开集群主机电源
等待集群启动并运行这可能需要大约10分钟。
等待集群启动并运行,这可能需要大约 10 分钟。
### 步骤 3: 检查所有主节点的状态
### 步骤 3检查所有主节点的状态
检查核心组件例如etcd服务的状态并确保一切就绪。
检查核心组件(例如 etcd 服务)的状态,并确保一切就绪。
```bash
kubectl get nodes -l node-role.kubernetes.io/master
```
### 步骤 4: 检查所有工作节点的状态
### 步骤 4检查所有工作节点的状态
```bash
kubectl get nodes -l node-role.kubernetes.io/worker
```
如果您的集群重启失败,请尝试[恢复etcd集群](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md#restoring-a-cluster)。
如果您的集群重启失败,请尝试[恢复 etcd 集群](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md#restoring-a-cluster)。

View File

@ -1,21 +1,19 @@
---
title: "DevOps 用户指南"
description: "如何使用 KubeSphere DevOps"
description: "开始使用 KubeSphere DevOps 工程"
layout: "single"
linkTitle: "DevOps 用户指南"
weight: 11000
icon: "/images/docs/docs.svg"
---
您可以使用 KubeSphere DevOps 系统在 Kubernetes 集群上部署和管理 CI/CD 任务以及相关的工作负载。本章演示如何在 DevOps 工程中进行管理和操作,包括运行流水线、创建凭证和集成工具等等。
可以使用 KubeSphere DevOps 系统在 Kubernetes 集群上部署和管理 CI/CD 任务以及相关的工作负载。本章演示了如何在 DevOps 工程中进行管理和工作,包括运行流水线、创建凭据和集成工具
安装 DevOps 组件时,会自动部署 Jenkins。您可以在 KubeSphere 中像以前一样通过 Jenkinsfile 构建流水线保持一致的用户体验。此外KubeSphere 还提供图形编辑面板,可以将整个流程可视化,为您直观地呈现流水线在每个阶段的运行状态
在安装 DevOps 组件时,将自动部署 Jenkins。KubeSphere 为您提供一致的用户体验,因为您可以像以前一样通过 Jenkinsfile 建立流水线。此外KubeSphere 还具有高度实时化的图形编辑面板,可以可视化整个流程,为您提供一个直观的视图,方便您了解流水线在每个阶段的运行状态。
## 理解和管理 DevOps 工程
## 了解和管理 DevOps 工程
### [概述](../devops-user-guide/understand-and-manage-devops-projects/overview/)
@ -55,21 +53,21 @@ icon: "/images/docs/docs.svg"
设置电子邮件服务器以接收有关您 Jenkins 流水线的通知。
### [为依赖缓存设置 CI 节点](../devops-user-guide/how-to-use/set-ci-node/)
### [为依赖缓存设置 CI 节点](../devops-user-guide/how-to-use/set-ci-node/)
配置专门用于持续集成 (CI) 的一个或一组节点从而加快流水线中的构建过程。
配置专门用于持续集成 (CI) 的一个或一组节点加快流水线中的构建过程。
### [流水线设置](../devops-user-guide/how-to-use/pipeline-settings/)
解 DevOps 工程中流水线的各个属性。
解 DevOps 工程中流水线的各个属性。
## 工具集成
### [将 SonarQube 集成到流水线](../devops-user-guide/how-to-integrate/sonarqube/)
### [将 SonarQube 集成到流水线](../devops-user-guide/how-to-integrate/sonarqube/)
将 SonarQube 集成到流水线中进行代码质量分析。
### [将 Harbor 集成到流水线](../devops-user-guide/how-to-integrate/harbor/)
### [将 Harbor 集成到流水线](../devops-user-guide/how-to-integrate/harbor/)
将 Harbor 集成到流水线中并向您的 Harbor 仓库推送镜像。
@ -79,10 +77,10 @@ icon: "/images/docs/docs.svg"
学习如何使用 KubeSphere 流水线构建并部署 Go 工程。
### [构建和部署 Maven 工程](../devops-user-guide/examples/a-maven-project/)
学习如何使用 KubeSphere 流水线构建并部署 Maven 工程。
### [使用 Jenkinsfile 在多集群项目中部署应用](../devops-user-guide/examples/multi-cluster-project-example/)
学习如何使用 KubeSphere 流水线基于 Jenkinsfile 在多集群项目中部署应用。
学习如何使用基于 Jenkinsfile 的流水线在多集群项目中部署应用。
### [构建和部署 Maven 工程](../devops-user-guide/examples/a-maven-project/)
学习如何使用 KubeSphere 流水线构建并部署 Maven 工程。

View File

@ -1,30 +1,30 @@
---
title: "How to Build and Deploy a Maven Project"
keywords: 'kubernetes, docker, devops, jenkins, maven'
description: ''
linkTitle: "Build And Deploy A Maven Project"
title: "构建和部署 Maven 工程"
keywords: 'Kubernetes, Docker, DevOps, Jenkins, Maven'
description: '如何构建和部署 Maven 工程'
linkTitle: "构建和部署 Maven 工程"
weight: 11430
---
## Prerequisites
## 准备工作
- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
- You need to create [DockerHub](http://www.dockerhub.com/) account.
- You need to create a workspace, a DevOps project, and a user account, and this account needs to be invited into the DevOps project as the role of `maintainer`.
- 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要有一个 [Docker Hub](http://www.dockerhub.com/) 帐户。
- 您需要创建一个企业空间、一个 DevOps 工程和一个用户帐户,并需要邀请该帐户至 DevOps 工程中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
## Workflow for Maven Project
## Maven 工程的工作流
As is shown in the graph, there is the workflow for a Maven project in KubeSphere DevOps, which uses the pipeline of Jenkins to build and deploy the Maven project. All steps are defined in the pipeline.
KubeSphere DevOps 中有针对 Maven 工程的工作流,如下图所示,它使用 Jenkins 流水线来构建和部署 Maven 工程。所有步骤均在流水线中进行定义。
When running, Jenkins Master creates a Pod to run the pipeline. Kubernetes creates the Pod as the agent of Jenkins Master, and the Pod will be destoryed after pipeline finished. The main process is to clone code, build & push image, and deploy the workload.
![maven-project-jenkins](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/maven-project-jenkins.png)
![workflow](/images/devops/maven-project-jenkins.png)
首先Jenkins Master 创建一个 Pod 来运行流水线。Kubernetes 创建 Pod 作为 Jenkins Master 的 Agent该 Pod 会在流水线完成之后销毁。主要流程包括克隆代码、构建和推送镜像以及部署工作负载。
## Default Configurations in Jenkins
## Jenkins 中的默认配置
### Maven Version
### Maven 版本
Execute the following command in the Maven builder container to get version info.
在 Maven 构建器 (Builder) 容器中执行以下命令获取版本信息。
```bash
mvn --version
@ -36,136 +36,134 @@ Java home: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.i386/jre
Default locale: en_US, platform encoding: UTF-8
```
### Maven Cache
### Maven 缓存
Jenkins Agent mounts the directories by Docker Volume on the node. So the pipeline can cache some spicial directory such as `/root/.m2`, which is used for the Maven building and the default cache directory for Maven tools in KubeSphere DevOps so that the dependency packages are downloaded and cached on the node.
Jenkins Agent 通过节点上的 Docker 存储卷 (Volume) 挂载目录。流水线可以缓存一些特殊目录,例如 `/root/.m2`,这些特殊目录用于 Maven 构建并在 KubeSphere DevOps 中用作 Maven 工具的默认缓存目录,以便依赖项包下载和缓存到节点上。
### Global Maven Settings in Jenkins Agent
### Jenkins Agent 中的全局 Maven 设置
The default Maven settings file path is `maven` and the configuration file path is `/opt/apache-maven-3.5.3/conf/settings.xml`. Execute the following command to get the content of Maven settings.
Maven 设置的默认文件路径是 `maven`,配置文件路径是 `/opt/apache-maven-3.5.3/conf/settings.xml`。执行以下命令获取 Maven 的设置内容。
```bash
kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml
```
### Network of Maven Pod
### Maven Pod 的网络
The Pod labeled `maven` uses the docker-in-docker network to run the pipeline. That is, the `/var/run/docker.sock` in the node is mounted into the Maven container.
具有 `maven` 标签的 Pod 使用 docker-in-docker 网络来运行流水线,即节点中的 `/var/run/docker.sock` 被挂载至该 Maven 容器。
## A Maven Pipeline Example
## Maven 流水线示例
### Prepare for the Maven Project
### Maven 工程准备工作
- ensure build the Maven project successfully on the development device.
- add the Dockerfile file into the project repo for building the image, refer to <https://github.com/kubesphere/devops-java-sample/blob/master/Dockerfile-online>.
- add the yaml file into the project repo for deploy the workload, refer to <https://github.com/kubesphere/devops-java-sample/tree/master/deploy/dev-ol>. If there are different environments, you need to prepare multiple deployment files.
- 确保您在开发设备上成功构建 Maven 工程。
- 添加 Dockerfile 至工程仓库以构建镜像。有关更多信息,请参考 <https://github.com/kubesphere/devops-java-sample/blob/master/Dockerfile-online>
- 添加 YAML 文件至工程仓库以部署工作负载。有关更多信息,请参考 <https://github.com/kubesphere/devops-java-sample/tree/master/deploy/dev-ol>。如果有多个不同环境,您需要准备多个部署文件。
### Create the Credentials
### 创建凭证
- dockerhub-id. A *Account Credentials* for registry, e.g DockerHub.
- demo-kuebconfig. A *Kubeconfig Credential* for deploying workloads.
| 凭证 ID | 类型 | 用途 |
| --------------- | ---------- | --------------------- |
| dockerhub-id | 帐户凭证 | 仓库,例如 Docker Hub |
| demo-kubeconfig | kubeconfig | 部署工作负载 |
For details, please refer to the [Credentials Management](../../how-to-use/credential-management/).
有关详细信息,请参考[凭证管理](../../how-to-use/credential-management/)。
![view credential list](/images/devops/view-credential-list.png)
![查看凭证列表](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-credential-lists.PNG)
### Create the Project for Workloads
### 为工作负载创建一个项目
In this demo, all of workloads are deployed under `kubesphere-sample-dev`. So you need to create the project `kubesphere-sample-dev` in advance.
在本示例中,所有工作负载都部署在 `kubesphere-sample-dev` 项目中。您必须事先创建 `kubesphere-sample-dev` 项目。
![view namespace](/images/devops/view-namespace.png)
![查看项目](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-namespace.PNG)
### Create the Pipeline for the Maven Project
### 为 Maven 工程创建一个流水线
At first, create a *DevOps Project* and a *Pipeline* refer to [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel).
1. 在您的 DevOps 工程中,转到**流水线**页面并点击**创建**。有关更多信息,请参见[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel)。
Secondly, click *Edit Jenkinsfile* button under your pipeline.
2. 转到该流水线的详情页面,点击**编辑 Jenkinsfile**。
![edit jenkinsfile](/images/devops/edit-jenkinsfile.png)
![编辑 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/edit-jenkinsfile.PNG)
Paste the following text into the pop-up window and save it.
3. 复制粘贴以下内容至弹出窗口。您必须将 `DOCKERHUB_NAMESPACE` 的值替换为您自己的值,完成操作后进行保存。
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
parameters {
string(name:'TAG_NAME',defaultValue: '',description:'')
}
environment {
DOCKER_CREDENTIAL_ID = 'dockerhub-id'
KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
REGISTRY = 'docker.io'
// need to replace by yourself dockerhub namespace
DOCKERHUB_NAMESPACE = 'shaowenchen'
APP_NAME = 'devops-java-sample'
BRANCH_NAME = 'dev'
}
stages {
stage ('checkout scm') {
steps {
git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git"
}
}
stage ('unit test') {
steps {
container ('maven') {
sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test'
}
}
}
stage ('build & push') {
steps {
container ('maven') {
sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package'
sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .'
withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) {
sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER'
}
}
}
}
stage('deploy to dev') {
steps {
kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
}
}
```
parameters {
string(name:'TAG_NAME',defaultValue: '',description:'')
}
4. 保存该 Jenkinsfile您可以看到图形编辑面板上已自动创建阶段和步骤。
environment {
DOCKER_CREDENTIAL_ID = 'dockerhub-id'
KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
REGISTRY = 'docker.io'
// need to replace by yourself dockerhub namespace
DOCKERHUB_NAMESPACE = 'shaowenchen'
APP_NAME = 'devops-java-sample'
BRANCH_NAME = 'dev'
}
![查看 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-edit-jenkinsfile.PNG)
stages {
stage ('checkout scm') {
steps {
git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git"
}
}
### 运行和测试
stage ('unit test') {
steps {
container ('maven') {
sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test'
}
}
}
1. 点击运行并在 `TAG_NAME` 中输入内容,运行流水线。
stage ('build & push') {
steps {
container ('maven') {
sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package'
sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .'
withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) {
sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin'
sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER'
}
}
}
}
![运行 Maven 流水线](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/run-maven-pipeline.PNG)
stage('deploy to dev') {
steps {
kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
}
}
}
}
```
2. 待流水线运行完成,您可以看到下图所示内容。
After saving, you will get this.
![查看流水线运行结果](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-pipeline.PNG)
![view jenkinsfile](/images/devops/view-edit-jenkinsfile.png)
3. 在 `kubesphere-sample-dev` 项目中,已创建新的工作负载。
### Run and test
![查看工作负载](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload.PNG)
Click `run` and type `TAG_NAME` to run the pipeline.
4. 您可以查看服务 (Service) 的访问地址,如下所示。
![run maven pipeling](/images/devops/run-maven-pipeline.png)
After the run is complete, you can see the following figure.
![view result](/images/devops/view-result-maven-pipeline.png)
Under the project of `kubesphere-sample-dev`, there are new workloads created.
![maven workload](/images/devops/view-result-maven-workload.png)
You can view the access address of the service through service.
![maven service](/images/devops/view-result-maven-workload-svc.png)
## Summary
This document is not a getting-started document. It introduces some configurations for building Maven projects on the KubeSphere DevOps Platform. At the same time, a example flow of the Maven project is provided. In your case, you are free to add new steps to improve the pipeline.
![查看服务](/images/docs/zh-cn/devops-user-guide/examples/build-and-deploy-maven-project/view-result-maven-workload-svc.PNG)

View File

@ -152,4 +152,4 @@ weight: 11420
保存 Jenkinsfile 后,点击**运行**。如果一切顺利,您会在您的多集群项目中看到部署 (Deployment) 工作负载。
![multi-cluster-ok](/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png)
![Deployment](/images/docs/zh-cn/devops-user-guide/examples/deploy-apps-in-multicluster-project-using-jenkinsfile/multi-cluster-ok.PNG)

View File

@ -1,130 +1,132 @@
---
title: "如何将 Harbor 集成到流水线"
keywords: 'kubernetes, docker, devops, jenkins, harbor'
description: ''
title: "将 Harbor 集成到流水线"
keywords: 'Kubernetes, Docker, DevOps, Jenkins, Harbor'
description: '如何将 Harbor 集成到流水线'
linkTitle: "将 Harbor 集成到流水线"
weight: 11320
---
## 先决条件
本教程演示如何将 Harbor 集成到 KubeSphere 流水线。
- 您需要[启用 KubeSphere DevOps 系统](../../../../docs/pluggable-components/devops/)。
- 您需要创建一个企业空间,一个 DevOps 项目和一个项目**常规帐户project-regular**,并且需要将此帐户邀请到 DevOps 项目中。 请参阅创建[企业空间和项目](../../../../docs/quick-start/create-workspace-and-project)。
- 您已经安装 **Harbor**
## 准备工作
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`)。需要邀请该帐户至 DevOps 工程并赋予 `operator` 角色。如果尚未创建,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
## 安装 Harbor
强烈建议您通过应用程序商店安装 Harbor。你也可以通过 helm3 手动安装 Harbor。
强烈建议您通过 [KubeSphere 应用商店](../../../application-store/built-in-apps/harbor-app/)安装 Harbor。或者您可以使用 Helm3 手动安装 Harbor。
```bash
helm repo add harbor https://helm.goharbor.io
# for qucik taste, you can expose harbor by nodeport and disable tls.
# set externalURL as one of your node ip and make sure it can be accessed by jenkins.
# For a qucik start, you can expose Harbor by nodeport and disable tls.
# Set externalURL to one of your node ip and make sure it can be accessed by jenkins.
helm install harbor-release harbor/harbor --set expose.type=nodePort,externalURL=http://$ip:30002,expose.tls.enabled=false
```
几分钟后,打开浏览器并访问 `http://$node_ip:30003` 输入 **admin****Harbor12345**,然后单击 **log in** 登录。
![harbor-login](/images/devops-zh/harbor-login.png)
单击**新建项目**,输入项目名称,然后单击**确定**。
## 获取 Harbor 凭证
![harbor-new-project](/images/devops-zh/harbor-new-project.png)
1. 安装 Harbor 后,请访问 `NodeIP:30002` 并使用默认帐户和密码 (`admin/Harbor12345`) 登录控制台。转到**项目**并点击**新建项目**。
![harbor-project-ok](/images/devops-zh/harbor-project-ok.png)
![Harbor 项目](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/harbor-projects.png)
单击您刚刚创建的项目名称,找到**机器人帐户**选项卡,然后单击**添加机器人帐户**。
2. 设置名称并点击**确定**。
![harbor-robot-account](/images/devops-zh/harbor-robot-account.png)
![设置名称](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/set-name.png)
输入机器人帐户的名称,然后保存
3. 点击刚刚创建的项目,在**机器人帐户**选项卡下选择**添加机器人帐户**
![harbor-robot-account-ok](/images/devops-zh/harbor-robot-account-ok.png)
![机器人帐户](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account.png)
单击**导出到文件中**以保存凭证
4. 输入机器人帐户的名称并保存
![harbor-robot-account-save](/images/devops-zh/harbor-robot-account-save.png)
![设置名称](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/robot-account-name.png)
### 创建凭证
5. 点击**导出到文件中**,保存该令牌。
登录到 KubeSphere进入创建的 DevOps 项目,并在**工程管理**→**凭证**下创建以下**凭证**
![导出到文件](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/export-to-file.png)
![ks-console-create-credential](/images/devops-zh/ks-console-create-credential.png)
## 创建凭证
用户名是您刚刚保存的 json 文件的 `name` 字段内容。 密码使用 `token` 字段内容
1. 以 `project-regular` 身份登录 KubeSphere 控制台,转到您的 DevOps 工程,在**工程管理**下的**凭证**页面为 Harbor 创建凭证
![ks-console-credential-ok](/images/devops-zh/ks-console-credential-ok.png)
![创建凭证](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/create-credentials.PNG)
2. 在**创建凭证**页面,设置凭证 ID**类型**选择**帐户凭证**。**用户名**字段必须和您刚刚下载的 Json 文件中 `name` 的值相同,并在 **token / 密码**中输入该文件中 `token` 的值。
![凭证页面](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/credentials-page.png)
3. 点击**确定**以保存。
## 创建流水线
![ks-console-create-pipline](/images/devops-zh/ks-console-create-pipline.png)
1. 转到**流水线**页面,点击**创建**。在弹出对话框中输入基本信息,然后点击**下一步**。
在弹出窗口中填写流水线的基本信息,输入流水线的名称,然后将其他名称设置为默认值。
![基本信息](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/basic-info.png)
![create-pipline-2](/images/devops-zh/create-pipline-2.png)
2. **高级设置**中使用默认值,点击**创建**
![create-pipline-3](/images/devops-zh/create-pipline-3.png)
![高级设置](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/advanced-settings.PNG)
## 编辑 Jenkinsfile
单击流水线下的**编辑 Jenkinsfile** 按钮,然后将以下文本粘贴到弹出窗口中。 您需要替换环境变量 REGISTRYHARBOR_NAMESPACEAPP_NAMEHARBOR_CREDENTIAL
1. 点击该流水线进入其详情页面,然后点击**编辑 Jenkinsfile**
![editJenkinsfile](/images/devops-zh/edit-Jenkinsfile.png)
![编辑 jenkinsfile](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-harbor-into-pipelines/edit-jenkinsfile.PNG)
```pipeline {
pipeline {
agent {
node {
label 'maven'
}
}
environment {
// the address of your harbor registry
REGISTRY = '103.61.38.55:30002'
// the project name
// make sure your robot account have enough access to the project
HARBOR_NAMESPACE = 'ks-devops-harbor'
// docker image name
APP_NAME = 'docker-example'
// yuswift is the credential id you created on ks console
HARBOR_CREDENTIAL = credentials('yuswift')
}
stages {
stage('docker login') {
steps{
container ('maven') {
// replace the username behind -u and do not forget
sh '''echo $HARBOR_CREDENTIAL | docker login $REGISTRY -u 'robot$yuswift2018' --password-stdin'''
}
}
}
stage('build & push') {
steps {
container ('maven') {
sh 'git clone https://github.com/kstaken/dockerfile-examples.git'
sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .'
sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test'
}
}
}
}
}
2. 将以下内容复制粘贴至 Jenkinsfile。请注意您必须替换 `REGISTRY`、`HARBOR_NAMESPACE`、`APP_NAME` 和 `HARBOR_CREDENTIAL` 的值。
```
```groovy
pipeline {
agent {
node {
label 'maven'
}
}
environment {
// the address of your harbor registry
REGISTRY = '103.61.38.55:30002'
// the project name
// make sure your robot account have enough access to the project
HARBOR_NAMESPACE = 'ks-devops-harbor'
// docker image name
APP_NAME = 'docker-example'
// yuswift is the credential id you created on ks console
HARBOR_CREDENTIAL = credentials('yuswift')
}
stages {
stage('docker login') {
steps{
container ('maven') {
// replace the Docker Hub username behind -u and do not forget ''. You can also use a Docker Hub token.
sh '''echo $HARBOR_CREDENTIAL_PSW | docker login $REGISTRY -u 'robot$yuswift2018' --password-stdin'''
}
}
}
stage('build & push') {
steps {
container ('maven') {
sh 'git clone https://github.com/kstaken/dockerfile-examples.git'
sh 'cd dockerfile-examples/rethinkdb && docker build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test .'
sh 'docker push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:devops-test'
}
}
}
}
}
```
{{< notice note >}}
{{< notice note >}}
您可以通过带有环境变量的 jenkins 凭证将参数传递给 docker login -u。但是每个 harbor-robot-account 用户名都包含一个 “$” 字符当被环境变量使用时jenkins 将其转换为 “$$”。查看更多[相关信息](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/)。
您可以通过带有环境变量的 Jenkins 凭证来传送参数至 `docker login -u`。但是,每个 Harbor 机器人帐户的用户名都包含一个 `$` 字符当用于环境变量时Jenkins 会将其转换为 `$$`。[了解更多](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/)。
{{</ notice >}}
{{</ notice >}}
## 运行流水线
保存完 jenkinsfile 后,单击**运行**按钮。 如果一切顺利,您会看到 jenkins 将镜像推送到 Harbor 仓库中。
![run-pipline](/images/devops-zh/run-pipline.png)
保存该 JenkinsfileKubeSphere 会自动在图形编辑面板上创建所有阶段和步骤。点击**运行**来执行该流水线。如果一切运行正常Jenkins 将推送镜像至您的 Harbor 仓库。

View File

@ -1,220 +1,249 @@
---
title: "将 SonarQube 集成到流水线"
keywords: 'Kubernetes, KubeSphere, devops, jenkins, sonarqube, pipeline'
description: '本教程演示如何将 SonarQube 集成到流水线。'
keywords: 'Kubernetes, KubeSphere, devops, jenkins, sonarqube, 流水线'
description: '本教程演示如何将 SonarQube 集成到流水线。'
linkTitle: "将 SonarQube 集成到流水线"
weight: 11310
---
[SonarQube](https://www.sonarqube.org/) 是一种流行的代码质量持续监测工具。 您可以将其用于代码库的静态和动态分析。将其集成到 KubeSphere 的流水线中后, 当 SonarQube 在运行的流水线检测到问题时,您可以直接在仪表盘上查看常见的代码问题,比如 bug 和漏洞。</br>
本教程演示了如何将 SonarQube 集成到流水线中。 在[使用 Jenkinsfile 创建流水线](../../../devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile/)之前,请先参考以下步骤。
[SonarQube](https://www.sonarqube.org/) 是一种主流的代码质量持续检测工具。您可以将其用于代码库的静态和动态分析。SonarQube 集成到 KubeSphere 流水线后,如果在运行的流水线中检测到问题,您可以直接在仪表板上查看常见代码问题,例如 Bug 和漏洞。
## 先决条件
本教程演示如何将 SonarQube 集成到流水线中。在[使用 Jenkinsfile 创建流水线](../../../devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile/)之前,请先参考以下步骤。
您需要[启用 KubeSphere DevOps 系统](../../../../docs/pluggable-components/devops/)。
## 准备工作
## 安装 SonarQube 服务
您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
1. 如果尚未安装,请执行以下命令来安装 SonarQube 服务:
## 安装 SonarQube 服务器
```bash
helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n kubesphere-devops-system --create-namespace --set service.type=NodePort
```
要将 SonarQube 集成到您的流水线,必须先安装 SonarQube 服务器。
2. 您将得到以下提示
1. 请先安装 Helm以便后续使用该工具安装 SonarQube。例如运行以下命令安装 Helm 3
![sonarqube-install](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-install.png)
```bash
curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
```
查看 Helm 版本。
```bash
helm version
version.BuildInfo{Version:"v3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.11"}
```
{{< notice note >}}
有关更多信息,请参见 [Helm 文档](https://helm.sh/zh/docs/intro/install/)。
{{</ notice >}}
2. 执行以下命令安装 SonarQube 服务器。
```bash
helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n kubesphere-devops-system --create-namespace --set service.type=NodePort
```
{{< notice note >}}
请您确保使用 Helm 3 安装 SonarQube Server。
{{</ notice >}}
3. 您会获取以下提示内容:
![安装 SonarQube](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-install.png)
## 获取 SonarQube 控制台地址
1. 执行以下命令以获取 SonarQube 控制台地址。
1. 执行以下命令以获取 SonarQube NodePort
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services sonarqube-sonarqube)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
```
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services sonarqube-sonarqube)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
```
2. 您可以得到如下输出 (`31377` 是此示例中的端口号,可能与您的端口号不同):
2. 您可以获得如下输出(本示例中端口号为 `31377`,可能与您的端口号不同):
```bash
http://10.77.1.201:31377
```
```bash
http://10.77.1.201:31377
```
## 配置 SonarQube 服务器
### 步骤 1: 访问 SonarQube 控制台
### 步骤 1访问 SonarQube 控制台
1. 执行以下命令以查看 SonarQube 的状态。 请注意,只有在 SonarQube 启动并运行后才能访问 SonarQube 控制台。
1. 执行以下命令查看 SonarQube 的状态。请注意,只有在 SonarQube 启动并运行后才能访问 SonarQube 控制台。
```bash
$ kubectl get pod -n kubesphere-devops-system
NAME READY STATUS RESTARTS AGE
ks-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m
s2ioperator-0 1/1 Running 1 84m
sonarqube-postgresql-0 1/1 Running 0 5m31s
sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s
uc-jenkins-update-center-8c898f44f-m8dz2 1/1 Running 0 85m
```
```bash
$ kubectl get pod -n kubesphere-devops-system
NAME READY STATUS RESTARTS AGE
ks-jenkins-68b8949bb-7zwg4 1/1 Running 0 84m
s2ioperator-0 1/1 Running 1 84m
sonarqube-postgresql-0 1/1 Running 0 5m31s
sonarqube-sonarqube-bb595d88b-97594 1/1 Running 2 5m31s
uc-jenkins-update-center-8c898f44f-m8dz2 1/1 Running 0 85m
```
2. 在浏览器中访问 SonarQube 控制台 `http://10.77.1.201:31377`,您可以看到其主页,如下所示:
2. 在浏览器中访问 SonarQube 控制台 `http://{$Node IP}:{$NodePort}`,您可以看到其主页,如下所示:
![access-sonarqube-console](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/access-sonarqube-console.png)
![访问 SonarQube 控制台](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/access-sonarqube-console.png)
3. 击右上角的 **Log in**,然后使用默认帐户登陆 `admin/admin`
3. 击右上角的 **Log in**,然后使用默认帐户 `admin/admin` 登录
![log-in-page](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/log-in-page.png)
![登录页面](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/log-in-page.png)
{{< notice note >}}
{{< notice note >}}
您可能需要设置必要的端口转发规则并打开端口以访问安全组中的 SonarQube ,具体取决于实例的部署位置
取决于您的实例的部署位置,您可能需要设置必要的端口转发规则,并在您的安全组中放行该端口,以便访问 SonarQube
{{</ notice >}}
{{</ notice >}}
### 步骤 2: 创建 SonarQube 管理员 Token
### 步骤 2:创建 SonarQube 管理员令牌 (Token)
1. 击右上角字母 **A**,然后从菜单中选择 **My Account** 以转到 **Profile** 页面。
1. 击右上角字母 **A**,然后从菜单中选择 **My Account** 以转到 **Profile** 页面。
![sonarqube-config-1](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-config-1.png)
![SonarQube 配置-1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-1.png)
2. 单击 **Security** 并输入Token 名称,如 kubesphere
2. 点击 **Security** 并输入令牌名称,例如 `kubesphere`
![sonarqube-config-2](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-config-2.png)
![SonarQube 配置-2](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-2.png)
3. 单击 **Generate** 并复制 token
3. 点击 **Generate** 并复制此令牌
![sonarqube-config-3](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-config-3.png)
![SonarQube 配置-3](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-config-3.png)
{{< notice warning >}}
{{< notice warning >}}
如提示所示,请确保您确实复制了 token因为您将无法再次看到此 token
如提示所示,您无法再次查看此令牌,因此请确保复制成功
{{</ notice >}}
{{</ notice >}}
### 步骤 3: 创建一个 SonarQube Webhook 服务
### 步骤 3:创建 Webhook 服务器
1. 执行以下命令获取 SonarQube Webhook 的地址。
1. 执行以下命令获取 SonarQube Webhook 的地址。
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/
```
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/
```
2. 预期的输出结果:
2. 预期输出结果:
```bash
http://10.77.1.201:30180/sonarqube-webhook/
```
```bash
http://10.77.1.201:30180/sonarqube-webhook/
```
3. 依次单击 **Administration**, **Configuration****Webhooks** 创建一个 webhook。
3. 依次点击 **Administration**、**Configuration** 和 **Webhooks** 创建一个 Webhook。
![sonarqube-webhook-1](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-webhook-1.png)
![SonarQube Webhook-1](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-webhook-1.png)
4. 点击 **Create**
![sonarqube-webhook-3](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-webhook-2.png)
![SonarQube Webhook-2](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-webhook-2.png)
5. 在出现的对话框中输入**Name** 和 **Jenkins Console URL**即SonarQube Webhook地址。 单击 **Create** 完成
5. 在弹出对话框中输入 **Name****Jenkins Console URL**(即 SonarQube Webhook 地址)。点击 **Create** 完成操作
![webhook-page-info](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/webhook-page-info.png)
![Webhook 页面信息](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/webhook-page-info.png)
### 步骤 4: 将 SonarQube 配置添加到 ks-installer
### 步骤 4将 SonarQube 配置添加到 ks-installer
1. 执行以下命令编辑 `ks-installer`
```bash
kubectl edit cc -n kubesphere-system ks-installer
```
```bash
kubectl edit cc -n kubesphere-system ks-installer
```
2. 导航至 `devops`。 添加字段 `sonarqube` 并在其下指定 `externalSonarUrl``externalSonarToken`
2. 搜寻至 `devops`。添加字段 `sonarqube` 并在其下方指定 `externalSonarUrl``externalSonarToken`
```yaml
devops:
enabled: true
jenkinsJavaOpts_MaxRAM: 2g
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
sonarqube: # Add this field manually.
externalSonarUrl: http://10.77.1.201:31377 # The SonarQube IP address.
externalSonarToken: 00ee4c512fc987d3ec3251fdd7493193cdd3b91d # The SonarQube admin token created above.
```
```yaml
devops:
enabled: true
jenkinsJavaOpts_MaxRAM: 2g
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
sonarqube: # Add this field manually.
externalSonarUrl: http://10.77.1.201:31377 # The SonarQube IP address.
externalSonarToken: 00ee4c512fc987d3ec3251fdd7493193cdd3b91d # The SonarQube admin token created above.
```
3. 完成后保存文件。
3. 完成操作后保存文件。
### 步骤 5: 将 SonarQube Server 添加到 Jenkins
### 步骤 5:将 SonarQube 服务器添加至 Jenkins
1. 执行以下命令获取 Jenkins 的地址。
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
```
```bash
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services ks-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
```
2. 您可以得以下输出,获取 Jenkins 的端口号。
2. 您可以得以下输出,获取 Jenkins 的端口号。
```bash
http://10.77.1.201:30180
```
```bash
http://10.77.1.201:30180
```
3. 使用地址 `http://10.77.1.201:30180` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。 Jenkins 配置了KubeSphere LDAP这意味着您可以直接使用 KubeSphere 帐户(例如 admin/P@88w0rd登录 Jenkins。 有关配置 Jenkins 的更多信息,请参阅 [Jenkins 系统设置](../../../devops-user-guide/how-to-use/jenkins-setting/)。
3. 请使用地址 `http://Public IP:30180` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。此外Jenkins 还配置有 KubeSphere LDAP这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。有关配置 Jenkins 的更多信息,请参见 [Jenkins 系统设置](../../../devops-user-guide/how-to-use/jenkins-setting/)。
![jenkins-login-page](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/jenkins-login-page.png)
![Jenkins 登录页面](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-login-page.png)
{{< notice note >}}
{{< notice note >}}
您可能需要设置必要的端口转发规则并打开端口 `30180` 才能访问安全组中的 Jenkins具体取决于您的实例部署的位置
取决于您的实例的部署位置,您可能需要设置必要的端口转发规则,并在您的安全组中放行端口 `30180`,以便访问 Jenkins
{{</ notice >}}
{{</ notice >}}
4. 击左侧的 **Manage Jenkins**
4. 击左侧的 **Manage Jenkins**
![manage-jenkins](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/manage-jenkins.png)
![管理 Jenkins](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/manage-jenkins.png)
5. 向下翻页找到并**Configure System**
5. 向下翻页找到并**Configure System**
![configure-system](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/configure-system.png)
![configure-system](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/configure-system.png)
6. 导航到 **SonarQube servers**,然后单**Add SonarQube**
6. 搜寻到 **SonarQube servers**,然后点**Add SonarQube**
![add-sonarqube](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/add-sonarqube.png)
![添加 SonarQube](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/add-sonarqube.png)
7. 输入 **Name****Server URL** (`http://10.77.1.201:31377`)和 **Server authentication token**SonarQube 管理管理员 token。 单击**Apply**完成
7. 输入 **Name**、**Server URL** (`http://Node IP:port`) 和 **Server authentication token**SonarQube 管理管理员令牌)。点击 **Apply** 完成操作
![sonarqube-jenkins-settings](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-jenkins-settings.png)
![sonarqube-jenkins-settings](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-jenkins-settings.png)
### 步骤 6: 将 sonarqubeUrl 添加到 KubeSphere 控制台
### 步骤 6:将 sonarqubeURL 添加到 KubeSphere 控制台
您需要指定 `sonarqubeURL`,以便可以直接从 KubeSphere 控制台访问 SonarQube。
1. 执行以下命令:
```bash
kubectl edit cm -n kubesphere-system ks-console-config
```
```bash
kubectl edit cm -n kubesphere-system ks-console-config
```
2. 导航到 `client`,添加 `devops`字段,填写 `sonarqubeURL` 的值
2. 搜寻到 `client`,添加 `devops` 字段并指定 `sonarqubeURL`
```yaml
client:
version:
kubesphere: v3.0.0
kubernetes: v1.17.9
openpitrix: v0.3.5
enableKubeConfig: true
devops: # Add this field manually.
sonarqubeURL: http://10.77.1.201:31377 # The SonarQube IP address.
```
```bash
client:
version:
kubesphere: v3.0.0
kubernetes: v1.17.9
openpitrix: v0.3.5
enableKubeConfig: true
devops: # Add this field manually.
sonarqubeURL: http://10.77.1.201:31377 # The SonarQube IP address.
```
3. 保存文件。
3. 保存文件。
### 步骤 7: 重新启动服务使所有功能生效
### 步骤 7:重启服务
执行以下命令
执行以下命令
```bash
kubectl -n kubesphere-system rollout restart deploy ks-apiserver
@ -224,32 +253,32 @@ kubectl -n kubesphere-system rollout restart deploy ks-apiserver
kubectl -n kubesphere-system rollout restart deploy ks-console
```
## 为新项目创建 SonarQube Token
## 为新工程创建 SonarQube Token
您需要一个 SonarQube Token,以便您的流水线可以在运行时与 SonarQube 通信。
您需要一个 SonarQube 令牌,以便您的流水线可以在运行时与 SonarQube 通信。
1. 在 SonarQube 控制台上,**Create new project**
1. 在 SonarQube 控制台上,**Create new project**
![sonarqube-create-project](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-create-project.png)
![SonarQube 创建项目](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-create-project.png)
2. 输入项目密钥,例如 `java-demo`,然后单**Set Up**
2. 输入工程密钥,例如 `java-demo`,然后点**Set Up**
![jenkins-projet-key](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/jenkins-projet-key.png)
![Jenkins 项目密钥](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/jenkins-projet-key.png)
3. 输入项目名称,例如 `java-sample`,然后单**Generate**
3. 输入工程名称,例如 `java-sample`,然后点**Generate**
![generate-a-token](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/generate-a-token.png)
![创建令牌](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/generate-a-token.png)
4. 创建令牌后,**Continue**
4. 创建令牌后,**Continue**
![token-created](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/token-created.png)
![令牌已创建](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/token-created.png)
1. 分别选择 **Java****Maven** 。 复制下图中的绿色框中的序列号,如果要在流水线中使用,则需要在[凭据](../../../devops-user-guide/how-to-use/credential-management/#create-credentials)中添加此序列号。
5. 分别选择 **Java****Maven**。复制下图所示绿色框中的序列号,如果要在流水线中使用,则需要在[凭证](../../../devops-user-guide/how-to-use/credential-management/#创建凭证)中添加此序列号。
![sonarqube-example](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-example.png)
![sonarqube-example](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-example.png)
## 在 KubeSphere 控制台查看结果
使用[图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel)或[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile)之后,可以查看代码质量分析的结果。 如果 SonarQube 成功运行,您可能会看到下图所示结果。
您[使用图形编辑面板创建流水线](../../how-to-use/create-a-pipeline-using-graphical-editing-panel)或[使用 Jenkinsfile 创建流水线](../../how-to-use/create-a-pipeline-using-jenkinsfile)之后,可以查看代码质量分析的结果。如果 SonarQube 成功运行,您可能会看到下图所示结果。
![sonarqube-view-result](/images/docs/devops-user-guide-zh/integrate-sonarqube-into-pipeline-zh/sonarqube-view-result.png)
![sonarqube-view-result](/images/docs/zh-cn/devops-user-guide/tool-integration/integrate-sonarqube-into-pipelines/sonarqube-view-result.png)

View File

@ -1,18 +1,18 @@
---
title: "选择 Jenkins Agent"
keywords: 'Kubernetes, KubeSphere, docker, devops, jenkins, agent'
description: '本教程介绍 Jenkins Agent 和 KubeSphere 中的内置 podTemplates。'
keywords: 'Kubernetes, KubeSphere, Docker, DevOps, Jenkins, Agent'
description: '本教程介绍 Jenkins Agent 和 KubeSphere 中的内置 podTemplates。'
linkTitle: "选择 Jenkins Agent"
weight: 11250
---
`Agent` 部分指定整个流水线或特定阶段将在 Jenkins 环境中执行的位置,具体取决于 Agent 部分的放置位置。该部分必须在流水线的顶层定义,但是阶段级别的用法是可选的。有关更多信息,请参阅 [Jenkins 的官方文档。](https://www.jenkins.io/doc/book/pipeline/syntax/#agent)
`agent` 部分指定整个流水线或特定阶段 (Stage) 将在 Jenkins 环境中执行的位置,具体取决于`agent` 部分的放置位置。该部分必须在 `pipeline` 块的顶层进行定义,但是阶段级别的使用为可选。有关更多信息,请参见 [Jenkins 官方文档](https://www.jenkins.io/zh/doc/book/pipeline/syntax/#代理)。
## 内置 podTemplate
## 内置 podTemplate
podTemplate 是用于创建 agent Pod 的模板。 用户可以定义在 Kubernetes 插件中使用的 podTemplate。
podTemplate 是一种 Pod 模板,该 Pod 用于创建 Agent。用户可以定义在 Kubernetes 插件中使用的 podTemplate。
当流水线运行时,每个 Jenkins agent Pod 必须具有一个名为 `jnlp` 的容器用以在 Jenkins 主服务器和 Jenkins agent 之间进行通信。 另外,用户可以在 podTemplate 中添加容器以满足自己的需求。 他们可以选择使用自己的 Pod YAML 来灵活地控制 runtime并且可以通过 `container` 命令来切换容器。 请看下面的例子
当流水线运行时,每个 Jenkins Agent Pod 必须具有一个名为 `jnlp` 的容器,用于 Jenkins Master 和 Jenkins Agent 之间进行通信。另外,用户可以在 podTemplate 中添加容器以满足自己的需求。用户可以选择使用自己的 Pod YAML 来灵活地控制运行时环境 (Runtime),并且可以通过 `container` 命令来切换容器。请参见以下示例
```groovy
pipeline {
@ -44,13 +44,13 @@ spec:
}
```
同时为了减少降低用户的使用成本KubeSphere 内置了一些 podTemplate使用户可以避免 YAML 文件的编写
同时KubeSphere 内置了一些 podTemplate用户无需编写 YAML 文件,极大降低学习成本
在目前版本当中 KubeSphere 内置了 4 种类型的 podTemplate : `base`、`nodejs`、`maven`、`go`,并且在 Pod 中提供了隔离的 Docker 环境。
在目前版本KubeSphere 内置了 4 种类型的 podTemplate`base`、`nodejs`、`maven` 和 `go`,并且在 Pod 中提供隔离的 Docker 环境。
可以通过指定 Agent 的 label 使用内置的 podTempalte例如要使用 nodejs 的 podTemplate可以在创建流水线时指定 label 为 `nodejs`,如下给出示例。
您可以通过指定 Agent 的标签来使用内置 podTempalte。例如要使用 nodejs 的 podTemplate您可以在创建流水线时指定标签为 `nodejs`,具体参见以下示例。
![jenkins-agent](/images/docs/devops-user-guide/using-devops/jenkins-agent/jenkins-agent.jpg)
![Jenkins Agent](/images/docs/zh-cn/devops-user-guide/use-devops/choose-jenkins-agent/jenkins-agent.PNG)
```groovy
pipeline {
@ -79,56 +79,56 @@ pipeline {
| 名称 | 类型 / 版本 |
| --- | --- |
|Jenkins Agent Label | base |
| 容器名称 | base |
|Jenkins Agent 标签 | base |
|容器名称 | base |
| 操作系统 | centos-7 |
|Docker| 18.06.0|
|Helm | 2.11.0 |
|Kubectl| Stable release|
|内置工具 | unzip, which, make, wget, zip, bzip2, git |
|Kubectl| 稳定版 |
|内置工具 | unzip、which、make、wget、zip、bzip2、git |
### podTemplate nodejs
| 名称 | 类型 / 版本 |
| --- | --- |
|Jenkins Agent Label | nodejs |
| 容器名称 | nodejs |
|Jenkins Agent 标签 | nodejs |
|容器名称 | nodejs |
| 操作系统 | centos-7 |
|Node | 9.11.2 |
|Yarn | 1.3.2 |
| Docker | 18.06.0 |
| Helm | 2.11.0 |
|Kubectl | Stable release|
|内置工具| unzip, which, make, wget, zip, bzip2, git |
|Kubectl | 稳定版 |
|内置工具| unzip、which、make、wget、zip、bzip2、git |
### podTemplate maven
| 名称 | 类型 / 版本 |
| --- | --- |
| Jenkins Agent Label | maven |
| Jenkins Agent 标签 | maven |
| 容器名称 | maven |
| 操作系统| centos-7 |
| 操作系统 | centos-7 |
| Jdk | openjdk-1.8.0 |
| Maven | 3.5.3|
| Docker| 18.06.0 |
| Helm | 2.11.0 |
| Kubectl| Stable release |
| 内置工具 | unzip, which, make, wget, zip, bzip2, git |
| Kubectl| 稳定版 |
| 内置工具 | unzip、which、make、wget、zip、bzip2、git |
### podTemplate go
| 名称 | 类型 / 版本 |
| --- | --- |
| Jenkins Agent Label | go |
| Jenkins Agent 标签 | go |
| 容器名称 | go |
| 操作系统| centos-7 |
| 操作系统 | centos-7 |
| Go | 1.11 |
| GOPATH | /home/jenkins/go |
| GOROOT | /usr/local/go |
| Docker | 18.06.0 |
| Helm | 2.11.0 |
| Kubectl | Stable release |
| 内置工具 | unzip, which, make, wget, zip, bzip2, git |
| Kubectl | 稳定版 |
| 内置工具 | unzip、which、make、wget、zip、bzip2、git |

View File

@ -1,230 +1,232 @@
---
title: "使用 Jenkinsfile 创建流水线"
keywords: 'KubeSphere, Kubernetes, docker, spring boot, Jenkins, devops, ci/cd, pipeline'
keywords: 'KubeSphere, Kubernetes, Docker, Spring Boot, Jenkins, DevOps, CI/CD, 流水线'
description: "如何使用 Jenkinsfile 创建流水线。"
linkTitle: "使用 Jenkinsfile 创建流水线"
weight: 11210
---
Jenkinsfile 是一个文本文件,它包含了 Jenkins 流水线的定义并被检入源代码控制仓库。由于将整个工作流存储为代码,因此它是代码审查和流水线迭代过程的基础。有关更多信息,请参阅 [Jenkins官方文档](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/)。
Jenkinsfile 是一个文本文件,它包含 Jenkins 流水线的定义并被检入源代码控制仓库。Jenkinsfile 将整个工作流存储为代码,因此它是代码审查和流水线迭代过程的基础。有关更多信息,请参见 [Jenkins 官方文档](https://www.jenkins.io/zh/doc/book/pipeline/jenkinsfile/)。
本教程演示如何基于 GitHub 仓库中的 Jenkinsfile 创建流水线。 使用流水线,您可以将示例应用程序分别部署到可从外部访问的开发环境和生产环境。
本教程演示如何基于 GitHub 仓库中的 Jenkinsfile 创建流水线。您可以使用该流水线将示例应用程序分别部署到可从外部访问的开发环境和生产环境。
{{< notice note >}}
在 KubeSphere 中可以创建两种类型的流水线: 本教程中介绍了基于 SCM 中的 Jenkinsfile 创建的流水线和通过[图形编辑面板创建的流水线](../create-a-pipeline-using-graphical-editing-panel)。Jenkinsfile in SCM 意为将 Jenkinsfile 文件本身作为源代码管理 (Source Control Management) 的一部分KubeSphere 根据该文件内的流水线配置信息快速构建工程内的 CI/CD 功能模块,比如阶段 (Stage),步骤 (Step) 和任务 (Job)。因此,在代码仓库中应包含 Jenkinsfile。
{{</ notice >}}
KubeSphere 中可以创建两种类型的流水线:一种是本教程中介绍的基于 SCM 中 Jenkinsfile 创建的流水线,另一种是[通过图形编辑面板创建的流水线](../create-a-pipeline-using-graphical-editing-panel)。Jenkinsfile in SCM 需要源代码管理 (SCM) 中有内置 Jenkinsfile换句话说Jenkinsfile 作为 SCM 的一部分。KubeSphere DevOps 系统会根据代码仓库的现有 Jenkinsfile 自动构建 CI/CD 流水线。您可以定义工作流,例如 `stage``step`
## 先决条件
{{</ notice >}}
- 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户和一个 [GitHub](https://github.com/)帐户。
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要创建一个企业空间和一个具有项目管理 (`project-admin`) 权限的帐户,该账户必须是被赋予企业空间普通用户角色。如果还没准备好,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/) 。
- 您需要为运行流水线设置 CI 专用节点。请参阅[为缓存依赖项设置 CI 节点](../../how-to-use/set-ci-node/).
- 您需要安装和配置 SonarQube。 请参阅[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。 如果您跳过这一部分,则下面没有**SonarQube分析**。
## 准备工作
## 流水线概览
- 您需要有一个 [Docker Hub](https://hub.docker.com/) 帐户和一个 [GitHub](https://github.com/) 帐户。
- 您需要[启用 KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要创建一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`),需要邀请该帐户至 DevOps 工程中并赋予 `operator` 角色。如果尚未准备就绪,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
- 您需要设置 CI 专用节点用于运行流水线。请参考[为依赖项缓存设置 CI 节点](../../how-to-use/set-ci-node/)。
- 您需要安装和配置 SonarQube。请参考[将 SonarQube 集成到流水线](../../../devops-user-guide/how-to-integrate/sonarqube/)。如果您跳过这一部分,则没有下面的 **SonarQube 分析**阶段。
在此示例流水线中,共有八个阶段,下面的流程图简单说明了流水线完整的工作过程:
## 流水线概述
![Pipeline Overview](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/Pipeline-Overview.png)
本示例流水线包括以下八个阶段。
![流水线概览](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-overview.png)
{{< notice note >}}
- **阶段 1. Checkout SCM**: 拉取 GitHub 仓库代码。
- **阶段 2. Unit test**: 单元测试,如果测试通过了才继续下面的任务
- **阶段 3. SonarQube 质量检测**: sonarQube 代码质量检测
- **阶段 4.** **Build & push snapshot image**: 根据行为策略中所选择分支来构建镜像,并将 tag 为 SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER 推送至 Harbor (其中 $BUILD_NUMBER 为 pipeline 活动列表的运行序号)
- **阶段 5. Push the latest image**: 将 master 分支打上 tag 为 `latest`,并推送至 DockerHub。
- **阶段 6. Deploy to dev**: 将 master 分支部署到 Dev 环境,此阶段需要审核。
- **阶段 7. Push with tag**: 生成 tag 并 release 到 GitHub并推送到 DockerHub。
- **阶段 8. Deploy to production**: 将发布的 tag 部署到 Production 环境。
- **阶段 1Checkout SCM**:从 GitHub 仓库检出源代码。
- **阶段 2:单元测试**:待该测试通过后才会进行下一阶段
- **阶段 3SonarQube 分析**SonarQube 代码质量分析
- **阶段 4:构建并推送快照镜像**:根据**行为策略**中选定的分支来构建镜像,并将 `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` 标签推送至 Docker Hub其中 `$BUILD_NUMBER` 为流水线活动列表中的运行序号
- **阶段 5:推送最新镜像**:将 SonarQube 分支标记为 `latest`,并推送至 Docker Hub。
- **阶段 6:部署至开发环境**:将 SonarQube 分支部署到开发环境,此阶段需要审核。
- **阶段 7:带标签推送**:生成标签并发布到 GitHub该标签会推送到 Docker Hub。
- **阶段 8:部署至生产环境**:将已发布的标签部署到生产环境。
{{</ notice >}}
{{</ notice >}}
## 动手实验
### 步骤 1: 创建凭证
### 步骤 1创建凭证
1. 用项目普通用户 (`project-regular`) 登陆 KubeSphere 控制台。转到您的 DevOps 项目,然后在**工程管理**下的**凭证**中创建以下凭据。 有关如何创建凭证的更多信息,请参阅[凭证管理](../../../devops-user-guide/how-to-use/credential-management/).
1. `project-regular` 身份登录 KubeSphere 控制台。转到您的 DevOps 工程,在**工程管理**下的**凭证**页面创建以下凭证。有关如何创建凭证的更多信息,请参见[凭证管理](../../../devops-user-guide/how-to-use/credential-management/)。
{{< notice note >}}
如果您的帐户或密码中包含任何特殊字符,例如 `@``$`它们可能会在流水线运行时导致错误,因为它们可能无法识别。 在这种情况下,您需要先在某些第三方网站(例如 [urlencoder](https://www.urlencoder.org/) 上对帐户或密码进行编码。 之后,复制并粘贴输出以获取您的凭证信息。
如果您的帐户或密码中包含任何特殊字符,例如 `@``$`可能会因为无法识别而在流水线运行时导致错误。在这种情况下,您需要先在一些第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。
{{</ notice >}}
{{</ notice >}}
| 凭证 ID | 类型 | 用途 |
| --------------- | ------------------- | ------------ |
| dockerhub-id | 账户凭证 | Docker Hub |
| github-id | 账户凭证 | GitHub |
| demo-kubeconfig | kubeconfig | Kubernetes |
| 凭证 ID | 类型 | 用途 |
| --------------- | ---------- | ---------- |
| dockerhub-id | 帐户凭证 | Docker Hub |
| github-id | 帐户凭证 | GitHub |
| demo-kubeconfig | kubeconfig | Kubernetes |
2. 您需要为 SonarQube 创建一个附加的凭证 ID`sonar-token`),该 ID 在上述第3阶段 SonarQube 分析)中使用。 请参阅[为新项目创建 SonarQube Token](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project)以将Token 填入以下 `token/ 密码`字段。 单击**确定**完成
2. 您还需要为 SonarQube 创建一个凭证 ID (`sonar-token`),用于上述的阶段 3SonarQube 分析)。请参考[为新工程创建 SonarQube 令牌 (Token)](../../../devops-user-guide/how-to-integrate/sonarqube/#为新工程创建-sonarqube-token),在下图所示的**密钥**字段中输入令牌。点击**确定**完成操作
![sonar-token](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/sonar-token.png)
![Sonar 令牌](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonar-token.PNG)
3. 列表中总共有四个凭证。
3. 您可以在列表中看到已创建的四个凭证。
![credential-list](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/credential-list.png)
![凭证列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/credential-list.PNG)
### 步骤 2: 在 GitHub 仓库库中修改 Jenkinsfile
### 步骤 2:在 GitHub 仓库中修改 Jenkinsfile
1. 登录GitHub。 从 GitHub 仓库中将 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) fork 到您自己的 GitHub 帐户。
1. 登录 GitHub 并 Fork GitHub 仓库 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 至您的 GitHub 个人帐户。
![fork-github-repo](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/fork-github-repo.png)
![Fork GitHub 仓库](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.PNG)
2. 在您自己的GitHub 仓库 **devops-java-sample** 中,击根目录中的文件 `Jenkinsfile-online`
2. 在您自己的 GitHub 仓库 **devops-java-sample** 中,击根目录中的文件 `Jenkinsfile-online`
![jenkins-edit-1](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/jenkins-edit-1.png)
![编辑 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.PNG)
3. 单击右侧的编辑图标编辑环境变量。
3. 点击右侧的编辑图标,编辑环境变量。
![jenkins-edit-2](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/jenkins-edit-2.png)
![编辑 Jenkinsfile](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.PNG)
| 修改项 | 值 | 含义 |
| 条目 | 值 | 描述信息 |
| :--- | :--- | :--- |
| DOCKER\_CREDENTIAL\_ID | dockerhub-id | 填写创建凭证步骤中 DockerHub **凭证 ID**,用于登录您的 DockerHub |
| GITHUB\_CREDENTIAL\_ID | github-id | 您在 KubeSphere 中为 GitHub 帐户设置的**凭证 ID**。 它用于将标签推送到您的 GitHub 仓库。 |
| KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | 您在 KubeSphere 中为 kubeconfig 设置的**凭证 ID**。 它用于访问运行中的 Kubernetes 集群。 |
| REGISTRY | docker.io | 默认为 **docker.io**,用作推送镜像的地址。 |
| DOCKERHUB\_NAMESPACE | your-dockerhub-account | 替换为您的 DockerHub 账号名(它也可以是账户下的 Organization 名称) |
| GITHUB\_ACCOUNT | your-github-account | 替换为您的 GitHub 账号名,例如 地址是 `https://github.com/kubesphere/`则填写 `kubesphere` (它也可以是账户下的 Organization 名称) |
| APP\_NAME | devops-java-sample | 应用名称 |
| SONAR\_CREDENTIAL\_ID | sonar-token | 填写创建凭证步骤中的 SonarQube token **凭证 ID**,用于代码质量检测 |
| DOCKER\_CREDENTIAL\_ID | dockerhub-id | 您在 KubeSphere 中为 Docker Hub 帐户设置的**凭证 ID**。 |
| GITHUB\_CREDENTIAL\_ID | github-id | 您在 KubeSphere 中为 GitHub 帐户设置的**凭证 ID**,用于将标签推送至您的 GitHub 仓库。 |
| KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | 您在 KubeSphere 中为 kubeconfig 设置的**凭证 ID**用于访问运行中的 Kubernetes 集群。 |
| REGISTRY | docker.io | 默认为 `docker.io`,用作推送镜像的地址。 |
| DOCKERHUB\_NAMESPACE | your-dockerhub-account | 请替换为您的 Docker Hub 帐户名,也可以替换为该帐户下的 Organization 名称。 |
| GITHUB\_ACCOUNT | your-github-account | 请替换为您的 GitHub 帐户名。例如,如果您的 GitHub 地址是 `https://github.com/kubesphere/`,则您的 GitHub 帐户名为 `kubesphere`,也可以替换为该帐户下的 Organization 名称。 |
| APP\_NAME | devops-java-sample | 应用名称 |
| SONAR\_CREDENTIAL\_ID | sonar-token | 您在 KubeSphere 中为 SonarQube 令牌设置的**凭证 ID**,用于代码质量检测。 |
{{< notice note >}}
Jenkinsfile 中 `mvn` 命令的参数 `-o` 表示开启离线模式。本教程中已下载相关依赖项,以节省时间并适应某些环境中的网络干扰。离线模式默认开启。
{{</ notice >}}
Jenkinsfile 中 mvn 命令的参数 -o表示开启离线模式。本教程中已经下载了相关的依存关系以节省时间并适应某些环境中的网络干扰。 离线模式默认情况下处于启用状态。
4. 编辑环境变量后,点击页面底部的 **Commit changes**,更新 SonarQube 分支中的文件
{{</ notice >}}
![提交更改](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/commit-changes.PNG)
4. 编辑环境变量后,单击页面底部的 **Commit changes**,这将更新 SonarQube 分支中的文件。
### 步骤 3创建项目
![commit-changes](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/commit-changes.png)
### 步骤 3: 创建项目
您需要创建两个项目,例如`kubesphere-sample-dev` 和 `kubesphere-sample-prod`,分别代表开发环境和生产环境。 一旦流水线成功运行,将在这两个项目中自动创建应用程序的相关部署和服务。
您需要创建两个项目,例如 `kubesphere-sample-dev``kubesphere-sample-prod`,分别代表开发环境和生产环境。待流水线成功运行,将在这两个项目中自动创建应用程序的相关部署 (Deployment) 和服务 (Service)。
{{< notice note >}}
帐户 `project-admin` 需要提前创建,因为它是 CI/CD 流水线的审核者。 有关更多信息,请参见[创建工作区,项目,帐户和角色](../../../quick-start/create-workspace-and-project/)。
您需要提前创建 `project-admin` 帐户,用作 CI/CD 流水线的审核者。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
{{</ notice >}}
1. 使用 `project-admin` 帐户登录KubeSphere。 在相同的企业空间 (workspace) 创建下两个 DevOps 项目。确保`project-regular`账户以`项目维护者`角色被邀请到该项目
1. `project-admin` 身份登录 KubeSphere。在您创建 DevOps 工程的企业空间中创建以下两个项目。请确保邀请 `project-regular` 帐户至这两个项目中并赋予 `operator` 角色
| 项目名称 | 别名 |
| 项目名称 | 别名 |
| ---------------------- | ----------------------- |
| kubesphere-sample-dev | development environment |
| kubesphere-sample-prod | production environment |
2. 检查项目列表。 您有两个项目和一个DevOps项目,如下所示:
2. 项目创建后,会显示在项目列表中,如下所示:
![project-list](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/project-list.png)
![项目列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/project-list.PNG)
### 步骤 4: 创建流水线
### 步骤 4创建流水线
1. 注销登陆 KubeSphere然后用`project-regular`账户重新登录,跳转到 DevOps 工程 `demo-devops`,然后单击**创建**构建新流水线。
1. 登出 KubeSphere然后以 `project-regular` 身份重新登录,转到 DevOps 工程 `demo-devops`,点击**创建**构建新流水线。
![create-pipeline](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/create-pipeline.png)
![创建流水线](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline.PNG)
2. 在出现的对话框中填入基本信息。 将其命名为 `jenkinsfile-in-scm` 并选择一个代码存储库。
2. 在弹出对话框中填入基本信息,将其命名为 `jenkinsfile-in-scm` 并选择代码仓库。
![create-pipeline-2](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/create-pipeline-2.png)
![创建流水线-2](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.PNG)
3. 如果您没有 GitHub Token ,在 **GitHub** 选项卡中,单击 **Get Token** 生成一个新的 GitHub Token。 将 Token 粘贴到框中,然后单击**确认**。
3. 如果您没有 GitHub 令牌,请在 **GitHub** 选项卡中点击**获取 Token** 生成一个新的 GitHub 令牌。将令牌粘贴到文本框中,点击**确认**。
![generate-github-token-1](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/generate-github-token-1.png)
![生成 GitHub 令牌-1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.PNG)
![generate-github-token-2](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/generate-github-token-2.png)
![生成 GitHub 令牌-2](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.PNG)
4. 选择您的 GitHub 帐户。 与该 token 相关的所有仓库将在右侧列出。 选择 **devops-java-sample** 并单击 **Select this repo**。 单击**下一步**继续。
4. 选择您的 GitHub 帐户,与该令牌相关的所有仓库将在右侧列出。选择 **devops-java-sample** 并点击**选择此仓库**,点击**下一步**继续。
![select-repo](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/select-repo.png)
![选择仓库](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/select-repo.PNG)
5. 在**高级设置**中,选中**丢弃旧的分支**旁边的方框。 在本教程中,参数**值保留分支的天数**和**保留分支的最大个数**可以使用默认。
5. 在**高级设置**中,选中**丢弃旧的分支**旁边的方框。本教程中,您可以为**保留分支的天数**和**保留分支的最大个数**使用默认
![branch-settings](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/branch-settings.png)
![分支设置](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/branch-settings.PNG)
丢弃旧分支意味着您将一起丢弃分支记录。 分支记录包括控制台输出,已归档工件以及特定分支的其他相关元数据。 更少的分支意味着您可以节省 Jenkins 正在使用的磁盘空间。 KubeSphere 提供两个选项来确定何时丢弃旧分支:
丢弃旧的分支意味着您将一并丢弃分支记录。分支记录包括控制台输出、已归档制品以及特定分支的其他相关元数据。更少的分支意味着您可以节省 Jenkins 正在使用的磁盘空间。KubeSphere 提供两个选项来确定何时丢弃旧分支:
- 保留分支的天数:在一定天数之后,分支将被丢弃。
- 保留分支的天数:在一定天数之后,丢弃分支
- 保留分支的最大个数:分支达到一定数量后,最旧的分支将被丢弃
- 保留分支的最大个数:分支达到一定数量后,丢弃最旧的分支。
{{< notice note >}}
**保留分支的天数**和**保留分支的最大个数**可以同时应用于分支。只要某个分支满足其中一个字段所设置的条件,则会丢弃该分支。例如,如果您将保留天数和最大分支数分别指定为 2 和 3待某个分支的保留天数超过 2 或者分支保留数量超过 3则会丢弃该分支。KubeSphere 默认用 -1 预填充这两个字段,表示已删除的分支将被丢弃。
{{</ notice >}}
**值保留分支的天数**和**保留分支的最大个数** 可以同时应用于分支。 只要某个分支的保留天数和个数不满足任何一个设置的条件,则将丢弃该分支。假设设置的保留天数和个数为 2 和 3则分支的保留天数一旦超过 2 或者保留个数超过 3则将丢弃该分支。默认两个值为 -1表示将会丢弃已经被删除的分支。
6. 在**行为策略**中KubeSphere 默认提供三种策略。本示例中不会使用**从 Fork 仓库中发现 PR** 这条策略,因此您可以删除该策略。您无需修改设置,可以直接使用默认值
{{</ notice >}}
![删除行为策略](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.PNG)
6. 在**行为策略**中默认情况下KubeSphere 提供三种策略。 由于本示例还未用到 **从 Fork 仓库中发现 PR** 这条策略,此处可以删除该策略,点击右侧删除按钮删除即可。
![remove-behavioral-strategy](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/remove-behavioral-strategy.png)
在 Jenkins 流水线被触发时,开发者提交的 PRPull Request也将被视为一个单独的分支。
Jenkins 流水线运行时,开发者提交的 Pull Request (PR) 也将被视为一个单独的分支。
**发现分支**
- **排除也作为 PR 提交的分支**. 选择此项表示 CI 将不会扫描源分支 (比如 Origin 的 master branch),也就是需要被 merge 的分支。
- **只有被提交为 PR 的分支**. 仅扫描 PR 分支。
- **所有分支** 拉取的仓库 (origin) 中所有的分支。
- **排除也作为 PR 提交的分支**:不扫描源分支,例如源仓库的 master 分支。需要合并这些分支。
- **只有被提交为 PR 的分支**仅扫描 PR 分支。
- **所有分支**:拉取源仓库中的所有分支。
**从原仓库中发现 PR**
- **PR 与目标分支合并后的源代码版本**. PR 合并到目标分支后,将基于源代码创建并运行流水线。
- **PR 本身的源代码版本**. 根据 PR 本身的源代码创建并运行流水线。
- **发现PR时会创建两个流水线**. KubeSphere 创建两个流水线,一个基于 PR 合并到目标分支后的源代码,另一个基于 PR 本身的源代码
- **PR 与目标分支合并后的源代码版本**PR 合并到目标分支后,基于源代码创建并运行流水线。
- **PR 本身的源代码版本**根据 PR 本身的源代码创建并运行流水线。
- **发现 PR 时会创建两个流水线**KubeSphere 创建两个流水线,一个流水线使用 PR 与目标分支合并后的源代码版本,另一个使用 PR 本身的源代码版本
7. 向下滚动到**脚本路径**。 该字段指定代码仓库中的 Jenkinsfile 路径。 它指示存储库的根目录。 如果文件位置更改,则脚本路径也需要更改。 请将其更改为 Jenkinsfile-online这是位于根目录中的示例仓库中 Jenkinsfile 的文件名。
7. 向下滚动到**脚本路径**。该字段指定代码仓库中的 Jenkinsfile 路径。它表示仓库的根目录。如果文件位置变更,则脚本路径也需要更改。请将其更改为 `Jenkinsfile-online`,这是示例仓库中位于根目录下的 Jenkinsfile 的文件名。
![jenkinsfile-online](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/jenkinsfile-online.png)
![Jenkinsfile-online](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.PNG)
8. 在 **扫描 Repo Trigger**, 单击 **如果没有扫描触发,则定期扫描**间隔 设置为 **5 分钟**。 单击**创建**完成配置。
8. 在**扫描 Repo Trigger** 中,点击**如果没有扫描触发,则定期扫描**并设置时间间隔为 **5 分钟**。点击**创建**完成配置。
![advanced-setting](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/advanced-setting.png)
![高级设置](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/advanced-settings.PNG)
{{< notice note >}}
您可以设置特定的时间间隔以允许流水线周期性地扫描远程仓库,这样就可以根据您在**行为策略**中设置的策略检测仓库有没有代码更新或新的 PR。
您可以设置特定的时间间隔让流水线扫描远程仓库,以便根据您在**行为策略**中设置的策略来检测代码更新或新的 PR。
{{</ notice >}}
### 步骤 5: 运行流水线
### 步骤 5运行流水线
1. 创建流水线后,它将显示在下面的列表中。 单击它转到其详细信息页面。
1. 流水线创建后,将显示在下图所示的列表中。点击该流水线进入其详情页面。
![pipeline-list](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/pipeline-list.png)
![流水线列表](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-list.PNG)
2. 在**活动**选项卡下, 三个分支正在扫描中。 单击右侧的**运行**,流水线将根据您设置的行为策略运行。 从下拉列表中选择 **sonarqube**,然后添加标签号,例如 v0.0.2。 单击**确定**触发新活动。
2. 在**活动**选项卡下,正在扫描三个分支。点击右侧的**运行**,流水线将根据您设置的行为策略运行。从下拉列表中选择 **sonarqube**,然后添加标签号,例如 `v0.0.2`。点击**确定**触发新活动。
![pipeline-detail](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/pipeline-detail.png)
![流水线详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.PNG)
![tag-name](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/tag-name.png)
![标签名称](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/tag-name.PNG)
{{< notice note >}}
- 如果您在此页面上未看到任何活动,则需要手动刷新浏览器或点击下拉菜单(**更多操作**按钮)中的**扫描远程分支**。
- 标签名称用于在 GitHub 和 Docker Hub 中指代新生成的发布版本和镜像。现有标签名称不能再次用于字段 `TAG_NAME`。否则,流水线将无法成功运行。
{{</ notice >}}
3. 稍等片刻,您会看到一些活动停止,一些活动失败。点击第一个活动查看其详细信息。
![活动失败](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/activity-failure.PNG)
{{< notice note >}}
- 如果确实需要在此页面上看到任何活动,则需要手动刷新浏览器或从下拉菜单中单击**扫描仓库****更多操作**按钮)。
- 标签名称用于在 GitHub 和 Docker Hub 中使用标签生成发布和镜像。 现有标签名称不能再次用于字段 TAG_NAME。 否则,流水线将无法成功运行。
活动失败可能由不同因素所引起。本示例中,在上述步骤中编辑分支环境变量时,仅更改了 sonarqube 分支的 Jenkinsfile。相反地dependency 和 master 分支中的这些变量保持不变(使用了错误的 GitHub 和 Docker Hub 帐户从而导致失败。您可以点击该活动查看其日志中的详细信息。导致失败的其他原因可能是网络问题、Jenkinsfile 中的编码不正确等等。
{{</ notice >}}
{{</ notice >}}
3. 请稍等片刻,您会看到一些活动停止而某些失败。 单击第一个以查看详细信息。
4. 流水线在 `deploy to dev` 阶段暂停,您需要手动点击**继续**。请注意,在 Jenkinsfile 中分别定义了三个阶段 `deploy to dev`、`push with tag` 和 `deploy to production`,因此将对流水线进行三次审核
![activity-failure](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/activity-failure.png)
![流水线继续](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.PNG)
{{< notice note >}}
队列失败可能是由不同的因素引起的。 在此示例中,在上述步骤中编辑分支环境变量时,仅更改了 sonarqube 分支的 Jenkinsfile。 相反,依赖项和 master 分支中的这些变量保持不变(即,错误的 GitHub 和 Docker Hub 帐户),从而导致失败。 您可以单击它并检查其日志以查看详细信息。 失败的其他原因可能是网络问题、Jenkinsfile 中的编码不正确等等。
{{</ notice >}}
4. 流水线在 `deploy to dev` 阶段暂停,您需要手动单击**继续**。请注意,在 Jenkinsfile 中分别定义了三个阶段 `deploy to dev`、`push with tag` 和 `deploy to production` ,因此将对流水线进行三次审核。
![pipeline-proceed](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/pipeline-proceed.png)
在实际开发或生产场景中,可能需要具有更高权限的管理员(例如版本管理员)来审核流水线、镜像以及代码分析结果, 他们有权决定流水线是否能进入下一阶段。在 Jenkinsfile 中, `input` 步骤可以指定用户审核流水线。如果您想指定一个用户(例如 `project-admin`) 来审核,您可以在 Jenkinsfile 的 input 函数中添加一个字段。如果是多个用户则通过逗号分隔,如下所示:
在开发或生产环境中,可能需要具有更高权限的人员(例如版本管理员)来审核流水线、镜像以及代码分析结果。他们有权决定流水线是否能进入下一阶段。在 Jenkinsfile 中,您可以使用 `input` 来指定由谁审核流水线。如果您想指定一个用户(例如 `project-admin`)来审核,您可以在 Jenkinsfile 中添加一个字段。如果有多个用户,则需要通过逗号进行分隔,如下所示:
```groovy
···
@ -232,83 +234,83 @@ Jenkinsfile 是一个文本文件,它包含了 Jenkins 流水线的定义并
···
```
### 步骤 6: 检查流水线状态
### 步骤 6检查流水线状态
1. 在**运行状态**中,您可以查看流水线的运行方式。 请注意,流水线在刚创建后将继续初始化几分钟。 示例流水线有八个阶段,它们已在 [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online) 中单独定义。
1. 在**运行状态**中,您可以查看流水线的运行状态。请注意,流水线在刚创建后将继续初始化几分钟。示例流水线有八个阶段,它们已在 [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online) 中单独定义。
![inspect-pipeline-log-1](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/inspect-pipeline-log-1.png)
![查看流水线日志-1](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.PNG)
2. 通过单击右上角的**查看日志**来检查流水线运行日志。 您可以看到流水线的动态日志输出,包括任何可能导致流水线无法运行的错误。对于每个阶段,您都可以单击它检查日志,而且可以将其下载到本地计算机以进行进一步分析。
2. 点击右上角的**查看日志**来查看流水线运行日志。您可以看到流水线的动态日志输出,包括可能导致流水线无法运行的错误。对于每个阶段,您都可以点击该阶段来查看其日志,而且可以将日志下载到本地计算机进行进一步分析。
![inspect-pipeline-log-2](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/inspect-pipeline-log-2.png)
![查看流水线日志-2](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.PNG)
### 步骤 7: 验证结果
### 步骤 7验证结果
1. 成功完成流水线后,单击**代码质量**通过 SonarQube 检查结果,如下所示。
1. 流水线成功运行后,点击**代码质量**通过 SonarQube 查看结果,如下所示。
![sonarqube-result-detail-1.png](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/sonarqube-result-detail-1.png)
![SonarQube 结果详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail-1.PNG)
![sonarqube-result-detail](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/sonarqube-result-detail.png)
![SonarQube 结果详情](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sonarqube-result-detail.PNG)
2. 正如在 Jenkinsfile 中定义的那样,通过流水线构建的 Docker 镜像也已成功推送到 Docker Hub。 在 Docker Hub 中,您会找到带有标签 v0.0.2 的镜像,该镜像是在流水线运行之前指定的
2. 按照 Jenkinsfile 中的定义,通过流水线构建的 Docker 镜像也已成功推送到 Docker Hub。在 Docker Hub 中,您会看到带有标签 `v0.0.2` 的镜像,该标签在流水线运行之前已指定
![docker-hub-result](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/docker-hub-result.png)
![Docker Hub 镜像](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/docker-hub-result.PNG)
3. 同时,GitHub 中生成一个新标签和一个新版本。
3. 同时GitHub 中生成一个新标签和一个新发布版本。
![github-result](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/github-result.png)
![GitHub 结果](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/github-result.PNG)
4. 示例应用程序将部署到 `kubesphere-sample-dev``kubesphere-sample-prod`,并创建相应的 Deployments 和 Services。 转到这两个项目,这是预期的结果
4. 示例应用程序将部署到 `kubesphere-sample-dev``kubesphere-sample-prod`,并创建相应的部署和服务。转到这两个项目,预期结果如下所示
| 环境 | URL | Namespace | Deployment | Service |
| 环境 | URL | 命名空间 | 部署 | 服务 |
| :--- | :--- | :--- | :--- | :--- |
| Development | `http://{$NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev |
| Development | `http://{NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev |
| Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample |
#### Deployments
#### 部署
![pipeline-deployments](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/pipeline-deployments.png)
![流水线部署](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/pipeline-deployments.PNG)
#### Services
#### 服务
![devops-prod](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/devops-prod.png)
![流水线服务](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/devops-prod.PNG)
{{< notice note >}}
您可能需要打开安全组中的端口,以便通过 URL 访问应用程序。
您可能需要在您的安全组中放行该端口,以便通过 URL 访问应用程序。
{{</ notice >}}
{{</ notice >}}
### 步骤 8: 访问示例服务
### 步骤 8访问示例服务
1. 请以管理员(`admin`)身份登录 KubeSphere 并使用**工具箱**Toolbox中的 **web kubectl** 访问服务。转到项目 `kubesphere-sample-dev`,然后在**应用负载**下的**服务**中选择 `ks-sample-dev` Endpoint 可用于访问服务。
1. `admin` 身份登录 KubeSphere 并使用**工具箱**中的 **Web Kubectl** 访问该服务。转到 `kubesphere-sample-dev` 项目,然后在**应用负载**下的**服务**中选择 `ks-sample-dev`。Endpoint 可用于访问服务。
![sample-app-result-check](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/sample-app-result-check.png)
![查看示例应用](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/sample-app-result-check.PNG)
![访问 Endpoint](/images/docs/zh-cn/devops-user-guide/use-devops/create-a-pipeline-using-a-jenkinsfile/access-endpoint.PNG)
![access-endpoint](/images/docs/devops-user-guide-zh/using-devops-zh/create-a-pipeline-using-a-jenkinsfile-zh/access-endpoint.png)
2. 从右下角的**工具箱Toolbox** 中使用 **web kubectl** 执行以下命令:
2. 在右下角的**工具箱**中使用 **Web Kubectl** 执行以下命令:
```bash
$curl 10.244.0.213:8000
$ curl 10.233.109.33:8080
```
3. 预期输出:
3. 预期输出:
```bash
Really appreciate your star, that's the power of our life.
```
{{< notice note >}}
{{< notice note >}}
使用 `curl` 访问 endpoints 或者 {$Virtual IP}:{$Port} 再或者 {$Node IP}:{$NodePort}
使用 `curl` 访问 Endpoint或者访问 {$Virtual IP}:{$Port} 或 {$Node IP}:{$NodePort}。
{{</ notice >}}
{{</ notice >}}
4. 同样,您可以在项目 `kubesphere-sample-prod` 中测试服务,您将看到相同的结果。
4. 同样,您可以在项目 `kubesphere-sample-prod` 中测试服务,您将看到相同的输出结果。
```bash
$ curl 10.244.0.213:8000
$ curl 10.233.109.2:8080
Really appreciate your star, that's the power of our life.
```

View File

@ -1,83 +1,87 @@
---
title: "凭证管理"
keywords: 'Kubernetes, docker, credential, KubeSphere, devops'
description: '本教程演示了如何在 DevOps 项目中管理凭证。'
keywords: 'Kubernetes, Docker, 凭证, KubeSphere, DevOps'
description: '本教程演示如何在 DevOps 工程中管理凭证。'
linkTitle: "凭证管理"
weight: 11230
---
凭证是包含了敏感数据的对象例如用户名密码、SSH 密钥和一些 Token 等。 当 KubeSphere 流水线运行时会与很多外部环境交互如拉取代码push/pull 镜像SSH 连接至相关环境中执行脚本等,此过程中需提供一系列凭证,而这些凭证不应明文出现在流水线中。</br>
具有必要权限的 DevOps 项目用户可以为 Jenkins 管道配置凭证。一旦用户在 DevOps 项目中添加或配置了这些凭证,就可以在 DevOps 项目中使用它们与第三方应用程序进行交互。</br>
目前,您可以在 DevOps 项目中存储以下4种类型的凭证
凭证是包含敏感信息的对象例如用户名和密码、SSH 密钥和令牌 (Token)。当 KubeSphere DevOps 流水线运行时,会与外部环境中的对象进行交互,以执行一系列任务,包括拉取代码、推送和拉取镜像以及运行脚本等。此过程中需要提供相应的凭证,而这些凭证不会明文出现在流水线中。
![create-credential-page](/images/docs/devops-user-guide-zh/credential-management-zh/create-credential-page.png)
具有必要权限的 DevOps 工程用户可以为 Jenkins 流水线配置凭证。用户在 DevOps 工程中添加或配置这些凭证后,便可以在 DevOps 工程中使用这些凭证与第三方应用程序进行交互。
- **账户凭证**: 可以作为单独的组件或以 `usernamepassword` 格式用冒号分隔的字符串处理的用户名和密码,例如 GitHubGitLab 和 Docker Hub 的帐户。
- **SSH**: 带有私钥的用户名SSH 公/私钥对。
- **秘密文本**: 密钥存放于文本文件中。
- **kubeconfig**: 常用于配置跨集群认证, 如果选择此类型,将自动获取当前 Kubernetes 集群的 kubeconfig 文件内容,并自动填充在当前页面对话框中。
目前,您可以在 DevOps 工程中存储以下 4 种类型的凭证:
本教程演示了如何在 DevOps 项目中创建和管理凭证。
![创建凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-page.png)
## 前提条件
- **帐户凭证**:用户名和密码,可以作为单独的组件处理,或者作为用冒号分隔的字符串(格式为 `username:password`)处理,例如 GitHub、GitLab 和 Docker Hub 的帐户。
- **SSH**带有私钥的用户名SSH 公钥/私钥对。
- **秘密文本**:文件中的秘密内容。
- **kubeconfig**:用于配置跨集群认证。如果选择此类型,将自动获取当前 Kubernetes 集群的 kubeconfig 文件内容,并自动填充在当前页面对话框中。
本教程演示如何在 DevOps 工程中创建和管理凭证。有关如何使用凭证的更多信息,请参见[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)和[使用图形编辑面板创建流水线](../create-a-pipeline-using-graphical-editing-panel/)。
## 准备工作
- 您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您有一个企业空间,一个 DevOps 项目和一个被邀请具有 DevOps 项目操作员角色的普通帐户(`企业空间普通成员`)。 如果尚未准备好,请参阅创[建企业空间,项目,帐户和角色](../../../quick-start/create-workspace-and-project/)。
- 您需要有一个企业空间、一个 DevOps 工程和一个帐户 (`project-regular`),并已邀请此帐户至 DevOps 工程中且授予 `operator` 角色。如果尚未准备好,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
## 创建凭证
`企业空间普通成员`身份登录 KubeSphere 控制台。 导航到您的 DevOps 项目,选择**凭证**,然后单击**创建**。
`project-regular` 身份登录 KubeSphere 控制台。进入您的 DevOps 工程,选择**凭证**,然后点击**创建**。
![create-credential-step1](/images/docs/devops-user-guide-zh/credential-management-zh/create-credential-step1.png)
![点击创建](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/create-credential-step1.PNG)
### 创建 Docker Hub 凭证
1. 在出现的对话框中,提供以下信息。
1. 在弹出对话框中输入以下信息。
![dockerhub-credentials](/images/docs/devops-user-guide-zh/credential-management-zh/dockerhub-credentials.png)
![DockerHub 凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/dockerhub-credentials.PNG)
- **凭证 ID**:设置可以在流水线中使用的 ID例如 `dockerhub-id`
- **类型** 选择 **账户凭证**
- **用户名**:您的 Docker Hub 帐户(即 Docker ID
- **token/密码**:您的 Docker Hub 密码。
- **描述信息**:凭证的简介。
2. 完成后,单击**确定**。
- **凭证 ID**:设置可以在流水线中使用的 ID例如 `dockerhub-id`
- **类型**:选择**帐户凭证**。
- **用户名**:您的 Docker Hub 帐户(即 Docker ID
- **token / 密码**:您的 Docker Hub 密码。
- **描述信息**:凭证的简介。
2. 完成操作后点击**确定**。
### 创建 GitHub 凭证
同样,按照上述相同步骤创建 GitHub 凭证。 设置一个**凭证 ID**(例如 github-id然后**类型**选择**账户凭证**。 分别输入 GitHub 用户名和密码作为**用户名**和**token/密码**
同样,按照上述相同步骤创建 GitHub 凭证。设置不同的**凭证 ID**(例如 `github-id`**类型**同样选择**帐户凭证**。分别在**用户名**和 **token / 密码**中输入您的 GitHub 用户名和密码
{{< notice note >}}
如果您的帐户或密码中包含任何特殊字符,例如 `@``$`它们可能会在流水线运行时导致错误,因为它们可能无法识别。 在这种情况下,您需要先在某些第三方网站(例如 [urlencoder](https://www.urlencoder.org/) )上对帐户或密码进行编码。 之后,复制并粘贴输出以获取您的凭证信息。
如果您的帐户或密码中包含任何特殊字符,例如 `@``$`可能会因为无法识别而在流水线运行时导致错误。在这种情况下,您需要先在一些第三方网站(例如 [urlencoder](https://www.urlencoder.org/))上对帐户或密码进行编码,然后将输出结果复制粘贴作为您的凭证信息。
{{</ notice >}}
### 创建 Kubeconfig 凭证
### 创建 kubeconfig 凭证
同样,按照上述相同步骤创建 kubeconfig 凭证。 设置其凭证 ID例如`demo-kubeconfig`)并选择 **kubeconfig**
同样,按照上述相同步骤创建 kubeconfig 凭证。设置不同的凭证 ID例如 `demo-kubeconfig`)并选择 **kubeconfig**
{{< notice info >}}
用于配置对群集的访问的文件称为 kubeconfig 文件。 这是引用配置文件的通用方法。 有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/)。 您可以创建 kubeconfig 凭证来访问当前 Kubernetes 集群,该凭证将在流水线中使用,您不需要更改文件,因为 KubeSphere 会自动使用当前 Kubernetes 集群的 kubeconfig 填充该字段。访问其他集群时,可能需要更改 kubeconfig。
用于配置集群访问的文件称为 kubeconfig 文件。这是引用配置文件的通用方法。有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/configuration/organize-cluster-access-kubeconfig/)。您可以创建 kubeconfig 凭证来访问当前 Kubernetes 集群,该凭证将在流水线中使用。您不需要更改该文件,因为 KubeSphere 会自动使用当前 Kubernetes 集群的 kubeconfig 填充该字段。访问其他集群时,可能需要更改 kubeconfig。
{{</ notice >}}
## 查看和管理凭证
1. 创建的凭据显示在列表中,如下所示。
1. 凭证创建后,会在列表中显示,如下所示。
![credentials-list](/images/docs/devops-user-guide-zh/credential-management-zh/credentials-list.png)
![凭证列表](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-list.PNG)
2. 单击任何一个都可以转到其详细信息页面,您可以在其中查看帐户详细信息以及与凭据有关的所有事件。
2. 点击任意一个凭证,进入其详情页面,您可以查看帐户详情和与此凭证相关的所有事件。
![credential-detail-page](/images/docs/devops-user-guide-zh/credential-management-zh/credential-detail-page.png)
![凭证详情页面](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/credential-detail-page.PNG)
3. 您也可以在此页面上编辑或删除凭据。 请注意在编辑凭据时KubeSphere 不会显示现有的用户名或密码信息。 如果输入新的用户名和密码,则前一个将被覆盖。
3. 您也可以在此页面上编辑或删除凭证。请注意编辑凭证时KubeSphere 不会显示现有用户名或密码信息。如果输入新的用户名和密码,则前一个将被覆盖。
![edit-credentials](/images/docs/devops-user-guide-zh/credential-management-zh/edit-credentials.png)
![编辑凭证](/images/docs/zh-cn/devops-user-guide/use-devops/credential-management/edit-credentials.PNG)
有关如何使用凭据的更多信息,请参见[使用 Jenkinsfile 创建管道](../create-a-pipeline-using-jenkinsfile/)和[使用图形编辑面板创建管道](../create-a-pipeline-using-graphical-editing-panel)。
## 另请参见
[使用 Jenkinsfile 创建流水线](../create-a-pipeline-using-jenkinsfile/)
[使用图形编辑面板创建流水线](../create-a-pipeline-using-graphical-editing-panel)

View File

@ -1,45 +1,46 @@
---
title: "为 KubeSphere 流水线设置电子邮件服务器"
keywords: 'KubeSphere, Kubernetes, notification, jenkins, devops, ci/cd, pipeline, email server'
keywords: 'KubeSphere, Kubernetes, 通知, Jenkins, DevOps, CI/CD, 流水线, 电子邮件服务器'
description: '为 KubeSphere CI/CD 流水线设置电子邮件服务器'
linkTitle: "为 KubeSphere 流水线设置电子邮件服务器"
Weight: 11260
---
内置的 Jenkins 无法与平台通知系统共享相同的电子邮件配置。 因此,您需要单独为 KubeSphere DevOps 流水线配置电子邮件服务器。
## 前提条件
内置 Jenkins 无法与平台通知系统共享相同的电子邮件配置。因此,您需要单独为 KubeSphere DevOps 流水线配置电子邮件服务器设置。
## 准备工作
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要一个被授予**集群管理**角色的帐户。 例如,您可以直接以 `admin` 身份登录控制台或使用授权创建新角色并将其分配给帐户。
- 您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建具有该授权的新角色并将该角色分配给一个帐户。
## 设置电子邮件服务器
1. 击左上角的**平台管理**,然后选择**集群管理**。
1. 击左上角的**平台管理**,然后选择**集群管理**。
![clusters-management](/images/docs/devops-user-guide-zh/jenkins-email-zh/clusters-management.png)
![集群管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/clusters-management.png)
2. 如果您已经在导入成员集群时启用了[多集群特性](../../../multicluster-management),那么您可以选择一个特定集群以查看其应用程序资源。 如果尚未启用该特性,请直接参考下一步。
2. 如果您已经启用[多集群功能](../../../multicluster-management)并已导入 Member 集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。
3. 转到**应用负载**下的**工作负载**,然后从下拉列表中选择项目 **kubesphere-devops-system**。 单**ks-jenkins** 右侧的三个点以编辑其 YAML 配置文件。
3. 转到**应用负载**下的**工作负载**,然后从下拉列表中选择 **kubesphere-devops-system** 项目。点**ks-jenkins** 右侧的三个点以编辑其 YAML 配置文件。
![workloads-list](/images/docs/devops-user-guide-zh/jenkins-email-zh/workloads-list.png)
![工作负载列表](/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/workloads-list.PNG)
4. 向下滚动到图像下方需要配置的字段, 完成配置后,单击**更新**
4. 向下滚动到下图所示的需要指定的字段。完成修改后,点击**更新**以保存
{{< notice warning >}}
{{< notice warning >}}
`ks-jenkins` 部署Deployment中修改电子邮件服务器后它将重新启动。 因此DevOps 系统将在几分钟内不可用, 请在适当的时候进行此类修改。
`ks-jenkins` 部署 (Deployment) 中修改电子邮件服务器后它会重新启动。因此DevOps 系统将在几分钟内不可用,请在适当的时候进行此类修改。
{{</ notice >}}
{{</ notice >}}
![set-jenkins-email-3](/images/docs/devops-user-guide-zh/jenkins-email-zh/set-jenkins-email.png)
![设置电子邮件](/images/docs/zh-cn/devops-user-guide/use-devops/set-email-server-for-kubesphere-pipelines/set-jenkins-email.png)
| 环境变量名称 | 描述 |
|---|---|
|EMAIL\_SMTP\_HOST | SMTP 服务器名称 |
|EMAIL\_SMTP\_PORT | SMTP 服务端口 (如25) |
|EMAIL\_FROM\_ADDR | 电子邮件发件人地址 |
|EMAIL\_FROM\_NAME | 电子邮件发件人姓名 |
|EMAIL\_FROM\_PASS | 电子邮件发件人密码 |
|EMAIL\_USE\_SSL | 是否启用 SSL 配置 |
| 环境变量名称 | 描述信息 |
| ----------------- | ------------------------- |
| EMAIL\_SMTP\_HOST | SMTP 服务器地址 |
| EMAIL\_SMTP\_PORT | SMTP 服务器端口25 |
| EMAIL\_FROM\_ADDR | 电子邮件发件人地址 |
| EMAIL\_FROM\_NAME | 电子邮件发件人姓名 |
| EMAIL\_FROM\_PASS | 电子邮件发件人密码 |
| EMAIL\_USE\_SSL | 是否启用 SSL 配置 |

View File

@ -1,44 +1,46 @@
---
title: "Jenkins 系统设置"
keywords: 'Kubernetes, KubeSphere, Jenkins, CasC'
description: '如何在 KubeSphere 中设置 Jenkins.'
description: '如何在 KubeSphere 中设置 Jenkins'
linkTitle: 'Jenkins 系统设置'
Weight: 11240
---
Jenkins 强大而灵活,已经成为 CI/CD 工作流事实上的标准。 但是,许多插件要求用户先设置系统级配置,然后才能使用。
KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能。为了为用户提供可调度的 Jenkins 环境KubeSphere 使用 **Configuration-as-Code** 进行 Jenkins 系统设置,这要求用户登录 Jenkins 仪表板并在修改后重新加载配置。在当前版本中Jenkins 系统设置在 KubeSphere 控制台上不可用,即将发布的版本将支持该设置。
本教程演示了如何在 Jenkins 仪表板上设置 Jenkins 并重新加载配置。
Jenkins 强大而灵活,已经成为 CI/CD 工作流的事实标准。但是,许多插件要求用户先设置系统级配置,然后才能使用。
## 先决条件
KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能。为了向用户提供可调度的 Jenkins 环境KubeSphere 使用 **Configuration-as-Code** 进行 Jenkins 系统设置,这要求用户登录 Jenkins 仪表板并在修改配置后重新加载。Jenkins 系统设置在 KubeSphere 当前版本的控制台上不可用,即将发布的版本将支持该设置。
本教程演示如何在 Jenkins 仪表板上设置 Jenkins 并重新加载配置。
## 准备工作
您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
## 修改 ConfigMap
建议您通过Configuration-as-CodeCasC在 KubeSphere 中配置 Jenkins。 内置的 Jenkins CasC 文件存储为 [ConfigMap](../../../project-user-guide/configuration/configmaps/)。
建议您通过 Configuration-as-Code (CasC) 在 KubeSphere 中配置 Jenkins。内置 Jenkins CasC 文件存储为 [ConfigMap](../../../project-user-guide/configuration/configmaps/)。
1. 以 kubeSphere 管理员(`admin`)身份登录, 单击左上角的**平台管理**,然后选择**集群管理**。
1. 以 `admin` 身份登录 KubeSphere击左上角的**平台管理**,然后选择**集群管理**。
![cluster-management](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/cluster-management.png)
![集群管理](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/cluster-management.png)
2. 如果您已经在导入成员集群时启用了[多集群特性](../../../multicluster-management),那么您可以选择一个特定集群以查看其应用程序资源。 如果尚未启用该特性,请直接参考下一步。
2. 如果您已经启用[多集群功能](../../../multicluster-management)并已导入 Member 集群,您可以选择一个特定集群来编辑 ConfigMap。如果您尚未启用多集群功能,请直接参考下一步。
3. 从导航栏中,在**配置中心**下选择**配置**。 在**配置**页面上,从下拉列表中选择 `kubesphere-devops-system`,然后单`jenkins-casc-config`
3. 在左侧导航栏中选择**配置中心**下的**配置**。在**配置**页面上,从下拉列表中选择 `kubesphere-devops-system`,然后点`jenkins-casc-config`
![edit-configmap](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/edit-configmap.png)
![编辑 ConfigMap](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-configmap.png)
4. 在详细信息页面上,从**更多操作**下拉列表中单击**编辑配置文件YAML 文件)**。
4. 在详情页面上,点击**更多操作**,在下拉列表中选择**编辑配置文件**。
![more-list](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/more-list.png)
![more-list](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/more-list.png)
5. 如下所示,`jenkins-casc-config` 的配置模板是一个 YAML 文件。 您可以在 ConfigMap 的代理Kubernetes Jenkins agent中修改容器镜像、标签等内容或者在 podTemplate 中添加容器。 完成后,单击**更新**。
5. `jenkins-casc-config` 的配置模板是一个 YAML 文件,如下图所示。您可以在 ConfigMap 的代理 (Kubernetes Jenkins Agent) 中修改容器镜像、标签等内容,或者在 podTemplate 中添加容器。完成操作后,点击**更新**。
![edit-jenkins](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/edit-jenkins.png)
![编辑 Jenkins](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/edit-jenkins.png)
## 登录 Jenkins 重新加载配置
修改 `jenkins-casc-config` 后,需要在 Jenkins 仪表板**Configuration as Code** 页面上重新加载更新的系统配置。 这是因为直接通过 Jenkins 仪表板配置的系统设置可能在 Jenkins 重新调度之后被 CasC`Configuration as Code` 配置覆盖。
修改 `jenkins-casc-config` 后,需要在 Jenkins 仪表板的 **Configuration as Code** 页面上重新加载更新的系统配置。这是因为直接通过 Jenkins 仪表板配置的系统设置可能在 Jenkins 重新调度之后被 CasC 配置覆盖。
1. 执行以下命令获取 Jenkins 的地址。
@ -48,38 +50,38 @@ KubeSphere DevOps 系统提供基于 Jenkins 的容器化 CI/CD 功能。为了
echo http://$NODE_IP:$NODE_PORT
```
2. 您可以看到如下所示的预期输出,它告诉您 Jenkins 的 IP 地址和端口号。
2. 您可以看到如下所示的预期输出,获取 Jenkins 的 IP 地址和端口号。
```bash
http://10.77.1.201:30180
```
3. 使用地址 `http://Node IP:Port Number` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。 Jenkins 配置了 KubeSphere LDAP这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。
3. 使用地址 `http://Node IP:Port Number` 访问 Jenkins。安装 KubeSphere 时,默认情况下也会安装 Jenkins 仪表板。Jenkins 还配置有 KubeSphere LDAP这意味着您可以直接使用 KubeSphere 帐户(例如 `admin/P@88w0rd`)登录 Jenkins。
![jenkins-dashboard](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/jenkins-dashboard.png)
![Jenkins 仪表板](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/jenkins-dashboard.png)
{{< notice note >}}
您可能需要设置必要的端口转发规则并打开端口 `30180` 才能访问安全组中的 Jenkins具体取决于您的实例部署的位置
取决于您的实例的部署位置,您可能需要设置必要的端口转发规则并在您的安全组中放行端口 `30180`,以便访问 Jenkins
{{</ notice >}}
{{</ notice >}}
4. 登录仪表板后,从导航栏中单击 **Manage Jenkins**
4. 登录仪表板后,点击导航栏中的 **Manage Jenkins**
![manage-jenkins](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/manage-jenkins.png)
![manage-jenkins](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/manage-jenkins.png)
5. 向下翻页并**Configuration as Code**.
5. 向下翻页并**Configuration as Code**.
![configuration-as-code](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/configuration-as-code.png)
![configuration-as-code](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/configuration-as-code.png)
6. 要重新加载在 ConfigMap 中修改的配置,请单击 **Apply new configuration**。.
6. 要重新加载 ConfigMap 中已修改的配置,请点击 **Apply new configuration**
![app-config](/images/docs/devops-user-guide-zh/using-devops-zh/jenkins-system-settings-zh/app-config.png)
![应用配置](/images/docs/zh-cn/devops-user-guide/use-devops/jenkins-system-settings/app-config.png)
7. 有关如何通过 CasC 设置 Jenkins 的更多信息,请参 [Jenkins 文档](https://github.com/jenkinsci/configuration-as-code-plugin)。
7. 有关如何通过 CasC 设置 Jenkins 的更多信息,请参 [Jenkins 文档](https://github.com/jenkinsci/configuration-as-code-plugin)。
{{< notice note >}}
在当前版本中,并非所有插件都支持 CasC 设置。 CasC 仅覆盖通过 CasC 设置的插件配置。
在当前版本中,并非所有插件都支持 CasC 设置。CasC 仅覆盖通过 CasC 设置的插件配置。
{{</ notice >}}
{{</ notice >}}

View File

@ -5,3 +5,5 @@ description: ''
linkTitle: "Pipeline Settings"
weight: 11280
---
TBD.

View File

@ -1,62 +1,63 @@
---
title: "为缓存依赖项设置 CI 节点"
keywords: 'Kubernetes, docker, KubeSphere, Jenkins, cicd, pipeline, dependency cache'
description: '如何为 KubeSphere 流水线的缓存依赖项设置 CI 节点'
linkTitle: "为缓存依赖项设置 CI 节点"
title: "为依赖项缓存设置 CI 节点"
keywords: 'Kubernetes, Docker, KubeSphere, Jenkins, CICD, 流水线, 依赖项缓存'
description: '如何为 KubeSphere 流水线依赖项缓存设置 CI 节点'
linkTitle: "为依赖项缓存设置 CI 节点"
weight: 11270
---
通常,在构建应用程序时需要提取不同的依赖关系。 这可能会导致某些问题,例如较长的拉取时间和网络的不稳定会进一步导致构建失败。 为了为您的流水线提供更可靠和稳定的环境您可以配置一个或一组专门用于持续集成CI的节点。 这些 CI 节点可以通过使用缓存来加快构建过程。</br>
本教程演示如何设置 CI 节点,以便 KubeSphere 调度流水线的任务,并在这些节点上构建 S2I / B2I。
通常情况下,构建应用程序的过程中需要拉取不同的依赖项。这可能会导致某些问题,例如拉取时间长和网络不稳定,这会进一步导致构建失败。要为您的流水线提供更可靠和稳定的环境,您可以配置一个节点或一组节点,专门用于持续集成 (CI)。这些 CI 节点可以通过使用缓存来加快构建过程。
## 前提条件
本教程演示如何设置 CI 节点,以便 KubeSphere 将流水线的任务以及 S2I/B2I 构建的任务调度到这些节点。
您需要一个被授予**集群管理**角色的帐户。 例如,您可以直接以 `admin` 身份登录控制台或使用授权创建新角色并将其分配给帐户。
## 准备工作
您需要一个具有**集群管理**权限的帐户。例如,您可以直接以 `admin` 身份登录控制台或者创建一个具有该权限的新角色并将该新角色其分配给一个帐户。
## 标记 CI 节点
1. 击左上角的**平台管理**,然后选择**集群管理**。
1. 击左上角的**平台管理**,然后选择**集群管理**。
![clusters-management](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/clusters-management.png)
![集群管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/clusters-management.png)
2. 如果您已经在导入成员集群时启用了[多集群特性](../../../multicluster-management),那么您可以选择一个特定集群以查看其应用程序资源。 如果尚未启用该特性,请直接参考下一步。
2. 如果您已经启用[多集群功能](../../../multicluster-management)并已导入 Member 集群,那么您可以选择一个特定集群以查看其节点。如果尚未启用该功能,请直接参考下一步。
3. 导航到**节点管理**下的**群集节点**,您可以在其中查看当前集群中的现有节点。
3. 转到**节点管理**下的**集群节点**,您可以在其中查看当前集群中的现有节点。
![Node Management](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-1.png)
![节点管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/node-management.png)
4. 从列表中选择一个节点以运行 CI 任务。 例如,在此处选择 `node2`,然后单击它以转到其详细信息页面。 单击**更多操作**,然后选择**编辑标签**。
4. 从列表中选择一个节点用来运行 CI 任务。例如,在此处选择 `node02`,点击它以转到其详情页面。点击**更多操作**,然后选择**编辑标签**。
![Select CI Node](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-2.png)
![选择 CI 节点](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-ci-node.png)
5. 在出现的对话框中,单击**添加标签**。 使用键 `node-role.kubernetes.io/worker` 和值 `ci` 添加新标签,然后单击**保存**。
5. 在弹出对话框中,点击 **Add 标签**。使用键 `node-role.kubernetes.io/worker` 和值 `ci` 添加新标签,然后点击**保存**。
![Add CI Label](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-3.png)
![添加 CI 标签](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-ci-label.png)
{{< notice note >}}
{{< notice note >}}
节点可能已经有空值的键,这种情况下您可以直接补充值 `ci`
节点可能已经有空值的键,这种情况下您可以直接补充值 `ci`
{{</ notice >}}
{{</ notice >}}
## 给 CI 节点添加污点
流水线和 S2I/B2I 工作流基本上是根据[节点亲和性](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)调度到该节点。 如果要将节点专用于 CI 任务,这意味着不允许为其安排其他工作负载,则可以在该节点上添加[污点](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)。
流水线和 S2I/B2I 工作流基本上会根据[节点亲和性](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)调度到该节点。如果要将节点专用于 CI 任务,即不允许将其他工作负载调度到该节点,您可以在该节点上添加[污点](https://kubernetes.io/zh/docs/concepts/scheduling-eviction/taint-and-toleration/)。
1. 单击**更多操作**,然后选择**污染管理**。
1. 点击**更多操作**,然后选择**污点管理**。
![Select CI Node](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-4.png)
![选择污点管理](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/select-taint-management.png)
2. 击**添加污点**,然后输入键 `node.kubernetes.io/ci` 而不指定值。 您可以根据需要选择 `不允许调度 (NoSchedule)``(尽量不调度) PreferNoSchedule` 。
2. 击**添加污点**,然后输入键 `node.kubernetes.io/ci` 而不指定值。您可以根据需要选择 `不允许调度 (NoSchedule)``尽量不调度 (PreferNoSchedule)` 。
![Add Taint](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-5.png)
![添加污点](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/add-taint.png)
3. 单击**保存**。 KubeSphere 将根据您设置的污点安排任务。 您现在可以回到DevOps工作流上工作。
3. 点击**保存**。KubeSphere 将根据您设置的污点调度任务。您现在可以回到 DevOps 流水线上进行操作。
![Taint Result](/images/docs/devops-user-guide-zh/set-ci-node-for-dependency-cache-zh/set-node-6.png)
![污点已添加](/images/docs/zh-cn/devops-user-guide/use-devops/set-ci-node-for-dependency-caching/taint-result.png)
{{< notice tip >}}
{{< notice tip >}}
本教程还介绍了与节点管理有关的操作。 有关详细信息,请参阅[节点管理](../../../cluster-administration/nodes/)。
本教程还涉及与节点管理有关的操作。有关详细信息,请参见[节点管理](../../../cluster-administration/nodes/)。
{{</ notice >}}
{{</ notice >}}

View File

@ -1,60 +1,59 @@
---
title: "DevOps 工程管理"
keywords: 'kubernetes, docker, helm, jenkins, istio, prometheus'
description: '本教程演示了如何创建和管理 DevOps 工程'
linkTitle: "DevOps 工程管理 "
keywords: 'Kubernetes, KubeSphere, DevOps, Jenkins'
description: '如何创建和管理 DevOps 工程'
linkTitle: "DevOps 工程管理"
weight: 11120
---
本教程演示如何创建和管理 DevOps 工程。
本教程演示如何创建和管理 DevOps 工程。
## 先决条件
## 准备工作
- 您需要创建一个企业空间和一个具有项目管理 (`project-admin`) 权限的帐户,该账户必须是被赋予企业空间普通用户角色。想要查询更多的信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
- 您需要创建一个企业空间和一个帐户 (`project-admin`),必须邀请该帐户至该企业空间并赋予 `workspace-self-provisioner` 角色。有关更多信息,请参考[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project/)。
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
## 创建 DevOps 工程
## 创建一个 DevOps 工程
1. 以 `project-admin` 身份登录 KubeSphere 控制台,转到 **DevOps 工程**,然后点击**创建**。
1. 用项目管理员身份登录。 转到 **DevOps 工程**,然后单击**创建**。
![创建 DevOps 工程](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-project-create.PNG)
![devops-project-create](/images/docs/devops-user-guide-zh/using-devops-zh/devops-project-management-zh/devops-project-create.png)
2. 输入 DevOps 工程的基本信息,然后点击**确定**。
2. 提供 DevOps 工程的基本信息,然后单击**确定**。
![输入基本信息](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/create-devops.PNG)
![create-devops](/images/docs/devops-user-guide-zh/using-devops-zh/devops-project-management-zh/create-devops.png)
- **名称**:此 DevOps 工程的简明名称,便于用户识别,例如 `demo-devops`
- **别名**:此 DevOps 工程的别名。
- **描述信息**:此 DevOps 工程的简要介绍。
- **集群设置**在当前版本中DevOps 工程无法同时跨多个集群运行。如果您已启用[多集群功能](../../../multicluster-management/),则必须选择一个集群来运行 DevOps 工程。
- **名称**: 此 DevOps 工程的简洁明了的名称,便于用户识别,例如 `demo-devops`
- **别名**: DevOps 工程的别名。
- **描述信息**: DevOps 工程的简要介绍。
- **集群设置**: 在当前版本中DevOps 工程无法同时跨多个集群运行。 如果启用了[多集群功能](../../../multicluster-management/),则必须选择运行 DevOps 工程的集群。
3. DevOps 工程创建后,会显示在下图所示的列表中。
3.创建后DevOps 工程将出现在下面的列表中。
![devops-list](/images/docs/devops-user-guide-zh/using-devops-zh/devops-project-management-zh/devops-list.png)
![DevOps 列表](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-list.PNG)
## 查看 DevOps 工程
单击刚刚创建的 DevOps 工程,转到其详细信息页面。 允许具有不同权限的租户在 DevOps 工程中执行各种任务,包括创建 CI/CD 流水线、凭据以及管理帐户和角色。
点击刚刚创建的 DevOps 工程,转到其详情页面。具有不同权限的租户可以在 DevOps 工程中执行各种任务,包括创建 CI/CD 流水线和凭证以及管理帐户和角色。
![devops-detail-page](/images/docs/devops-user-guide-zh/using-devops-zh/devops-project-management-zh/devops-detail-page.png)
![DevOps 详情页面](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/devops-detail-page.PNG)
### 流水线
流水线是一系列的插件集合可以通过组合它们来实现持续集成CI和持续交付CD的功能因此您的代码可以自动交付给任何目标。
流水线是一系列插件的集合,使您可以持续地测试和构建代码。流水线将持续集成 (CI) 和持续交付 (CD) 进行结合,提供精简的工作流,使您的代码可以自动交付给任何目标。
### 凭证
具有所需权限的 DevOps 工程用户可以为与外部环境进行交互的流水线配置凭据。 一旦用户在 DevOps 工程中添加了这些凭据DevOps 工程就可以使用凭据与第三方应用程序(例如 GitHubGitLab 和 Docker Hub进行交互。 有关更多信息,请参阅[凭据管理](../credential-management/)。
具有所需权限的 DevOps 工程用户可以为流水线配置凭证,以便与外部环境进行交互。用户在 DevOps 工程中添加凭证后DevOps 工程就可以使用这些凭证与第三方应用程序(例如 GitHub、GitLab 和 Docker Hub进行交互。有关更多信息请参见[凭证管理](../../how-to-use/credential-management/)。
### 成员和角色
与项目相似DevOps 工程还要求授予用户不同的角色,然后才能在 DevOps 工程中工作。 项目管理员(例如 `project-admin`)负责邀请租户并授予他们不同的角色。 有关更多信息,请参见[角色和成员管理](../role-and-member-management/)。
与项目相似DevOps 工程也需要为用户授予不同的角色,然后用户才能在 DevOps 工程中工作。工程管理员(例如 `project-admin`)负责邀请租户并授予他们不同的角色。有关更多信息,请参见[角色和成员管理](../role-and-member-management/)。
## 编辑或删除 DevOps 工程
1. 单击**工程管理**下的**基本信息**,您可以查看当前 DevOps 工程的概述,包括项目角色和成员的数量,工程名称和工程创建者。
1. 点击**工程管理**下的**基本信息**,您可以查看当前 DevOps 工程的概述,包括工程角色和工程成员的数量、工程名称和工程创建者。
2. 单击右侧的**工程管理**,您可以编辑 DevOps 工程的基本信息或删除 DevOps 工程。
2. 点击右侧的**工程管理**,您可以编辑此 DevOps 工程的基本信息或删除 DevOps 工程。
![project-basic-info](/images/docs/devops-user-guide-zh/using-devops-zh/devops-project-management-zh/project-basic-info.png)
![工程基本信息](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/devops-project-management/project-basic-info.PNG)

View File

@ -1,86 +1,85 @@
---
title: "角色和成员管理"
keywords: 'Kubernetes, KubeSphere, DevOps, role, member'
keywords: 'Kubernetes, KubeSphere, DevOps, 角色, 成员'
description: '角色和成员管理'
linkTitle: "角色和成员管理"
weight: 11130
---
本教程演示如何在 DevOps 工程中管理角色和成员。 有关 KubeSphere 角色的更多信息,请参见角色管理概述。
本教程演示如何在 DevOps 工程中管理角色和成员。
在 DevOps 工程范围内,您可以向角色授予以下资源的权限:
- 流水线
- 凭证
- DevOps
- DevOps 工程设
- 访问控制
## 先决条件
## 准备工作
至少已创建一个 DevOps 工程,例如 `demo-devops` 此外,您需要在 DevOps 工程级别具有管理员角色的帐户(例如 `devops-admin`)。
至少已创建一个 DevOps 工程,例如 `demo-devops`此外,您需要一个在 DevOps 工程级别具有 `admin` 角色的帐户(例如 `devops-admin`)。
## 内置角色
在**工程角色**中,有三个可用的内置角色,如下所示。 内置角色是在创建 DevOps 工程时由 KubeSphere 自动创建的,并且无法对其进行编辑或删除
在**工程角色**中,有三个可用的内置角色,如下所示。创建 DevOps 工程时KubeSphere 会自动创建内置角色,并且无法编辑或删除这些角色
| 内置角色 | 描述信息 |
| 内置角色 | 描述信息 |
| ------------------ | ------------------------------------------------------------ |
| viewer | DevOps 工程观察者,可以查看 DevOps 工程下所有的资源。 |
| operator | DevOps 工程普通成员,可以在 DevOps 工程下创建流水线凭证等。 |
| admin | DevOps 工程管理员,可以管理 DevOps 工程下所有的资源。 |
## 创建一个 DevOps 工程角色
## 创建 DevOps 工程角色
1. 以 `devops-admin` 身份登录控制台,然后在 **DevOps 工程**列表下选择一个 DevOps 项目(例如 `demo-devops`)。
1. 以 `devops-admin` 身份登录控制台,然后在 **DevOps 工程**列表下选择一个 DevOps 工程(例如 `demo-devops`)。
{{< notice note >}}
`devops-admin` 帐户为例。只要您使用的帐户被授予了一个角色包括在DevOps工程级别访问控制中的**工程观察者**、**工程管理员**和**工程维护者**的授权,它就可以创建 DevOps 项目角色。
本教程使用 `devops-admin` 帐户作为示例。只要您使用的帐户被授予的角色包含 DevOps 工程级别**访问控制**中的**成员查看**、**角色管理**和**角色查看**的权限,此帐户便可以创建 DevOps 工程角色。
{{</ notice >}}
{{</ notice >}}
2. 转到**工程管理**中的**工程角色**单击**创建**并设置**角色标识符**。 在此示例中,将创建一个名为 `pipeline-creator` 的角色。 单击**编辑权限**继续。
2. 转到**工程管理**中的**工程角色**点击**创建**并设置**角色标识符**。在本示例中,将创建一个名为 `pipeline-creator` 的角色。点击**编辑权限**继续。
![Create a devops project role](/images/docs/devops-admin-zh/devops_role_step1.png)
![创建角色](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step1.PNG)
3. 在**流水线管理**中,选择您希望授予该角色用户所拥有的权限。 例如,为此角色选择了**流水线管理**和**流水线查看**。 单击**确定**完成
3. 在**流水线管理**中,选择您希望授予该角色的权限。例如,为此角色选择了**流水线管理**和**流水线查看**。点击**确定**完成操作
![Edit Authorization](/images/docs/devops-admin-zh/devops_role_step2.png)
![分配角色](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-step2.PNG)
{{< notice note >}}
{{< notice note >}}
**依赖于**表示首先需要选择主要授权(**依赖于**之后列出的),以便可以分配关联授权。
{{</ notice >}}
{{</ notice >}}
4. 新创建的角色将列在**工程角色**中。 您可以单击右侧的三个点对其进行编辑。
4. 新创建的角色将列在**工程角色**中。您可以点击右侧的三个点对其进行编辑。
![Edit Roles](/images/docs/devops-admin-zh/devops_role_list.png)
![角色列表](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-role-list.PNG)
{{< notice note >}}
{{< notice note >}}
仅授予 `pipeline-creator` 角色流水线管理和查看权限可能无法满足您的实际需求。本示例仅用于演示目的。您可以根据实际需要自定义角色权限
`pipeline-creator` 角色仅被授予**流水线管理****流水线查看**权限可能无法满足您的实际需求。本示例仅用于演示,您可以根据实际需要创建自定义角色
{{</ notice >}}
{{</ notice >}}
## 邀请新成员
1. 在**工程管理**中选择**工程成员**,然后击**邀请成员**。
1. 在**工程管理**中选择**工程成员**,然后击**邀请成员**。
2. 邀请用户加入 DevOps 项目。向用户授予 `pipeline-creator`角色。
2. 邀请用户加入此 DevOps 工程,并向此用户授予 `pipeline-creator` 角色。
![invite member](/images/docs/devops-admin-zh/devops_invite_member.png)
![邀请成员](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-invite-member.PNG)
{{< notice note >}}
{{< notice note >}}
首先必须先邀请用户加入 DevOps 工程所在的企业空间。
必须先邀请用户加入 DevOps 工程所在的企业空间。
{{</ notice >}}
{{</ notice >}}
3. 将用户添加到 DevOps 工程后,单击**确定** 在**工程成员**中,您可以看到列出了新邀请的成员。
3. 点击**确定**将用户添加到 DevOps 工程。在**工程成员**中,您可以看到列出了新邀请的成员。
![list member](/images/docs/devops-admin-zh/devops_list_member.png)
4. 您还可以通过编辑现有成员来更改其角色或将其从 DevOps 工程中删除。
4. 您还可以通过编辑现有成员或从 DevOps 工程中将其删除来更改其角色。
![编辑成员](/images/docs/zh-cn/devops-user-guide/understand-and-manage-devops-projects/role-and-member-management/devops-user-edit.PNG)
![edit member role](/images/docs/devops-admin-zh/devops_user_edit.png)

View File

@ -0,0 +1,131 @@
---
title: "帐号无法登录"
keywords: "无法登录, account is not active, KubeSphere, Kubernetes"
description: "如何解决无法登录的问题"
linkTitle: "帐号无法登录"
Weight: 16440
---
KubeSphere 安装时会自动创建 admin/P@88w0rd 默认帐户ks-controller-manager 将用户状态同步到 openldap、Jekins 之后会加密帐户密码,在此之后帐户状态会被转换为 Active 帐户才可以正常登录。
下面是帐户无法登录时,一些常见的问题:
## account not active
![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/account-not-active.png)
您可以通过以下命令来检查帐户状态:
```
$ kubectl get users
NAME EMAIL STATUS
admin admin@kubesphere.io Active
```
检查 ks-controller-manager 是否正常运行,是否有异常日志:
```
kubectl -n kubesphere-system logs -l app=ks-controller-manager
```
### K8s 1.19 中 admission webhook 无法正常工作
K8s 1.19 使用了 Golang 1.15 进行编译,需要更新 admission webhook 用到的证书,该问题导致 ks-controller admission webhook 无法正常使用。
相关错误日志:
> Internal error occurred: failed calling webhook "validating-user.kubesphere.io": Post "https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=30s": x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0
相关 issue 和解决方式 https://github.com/kubesphere/kubesphere/issues/2928
### ks-controller-manager 无法正常工作
ks-controller-manager 依赖 openldap、Jenkins 这两个有状态服务,当 openldap 或 Jekins 无法正常运行时会导致 ks-controller-manager 一直处于 reconcile 状态。
可以通过以下命令检查 openldap 和 Jeknins 服务是否正常:
```
kubectl -n kubesphere-devops-system get po | grep -v Running
kubectl -n kubesphere-system get po | grep -v Running
kubectl -n kubesphere-system logs -l app=openldap
```
相关错误日志:
> failed to connect to ldap service, please check ldap status, error: factory is not able to fill the pool: LDAP Result Code 200 \"Network Error\": dial tcp: lookup openldap.kubesphere-system.svc on 169.254.25.10:53: no such host
> Internal error occurred: failed calling webhook “validating-user.kubesphere.io”: Post https://ks-controller-manager.kubesphere-system.svc:443/validate-email-iam-kubesphere-io-v1alpha2-user?timeout=4s: context deadline exceeded
**解决方式**
您需要先恢复 openldap、Jenkins 这两个服务并保证网络的连通性,重启 ks-controller-manager 后会立即触发 reconcile当无法连接到 openldap 或 Jenkins 时重试间隔会递增)。
```
kubectl -n kubesphere-system rollout restart deploy ks-controller-manager
```
### 使用了错误的代码分支
如果您使用了错误的 ks-installer 版本,会导致安装之后各组件版本不匹配。
通过以下方式检查各组件版本是否一致,正确的 image tag 应该是 v3.0.0。
```
kubectl -n kubesphere-system get deploy ks-installer -o jsonpath='{.spec.template.spec.containers[0].image}'
kubectl -n kubesphere-system get deploy ks-apiserver -o jsonpath='{.spec.template.spec.containers[0].image}'
kubectl -n kubesphere-system get deploy ks-controller-manager -o jsonpath='{.spec.template.spec.containers[0].image}'
```
## 帐号或密码错误
![account-not-active](/images/docs/faq/access-control-and-account-management/cannot-login/wrong-password.png)
ks-console 和 ks-apiser 需要借助 Redis 在多个副本之间共享数据,当 Redis 服务异常时会导致 ks-console 多个副本之间无法共享密码加密传输时使用的 salt。
通过以下命令检查帐号密码是否正确:
```
curl -u <USERNAME>:<PASSWORD> "http://`kubectl -n kubesphere-system get svc ks-apiserver -o jsonpath='{.spec.clusterIP}'`/api/v1/nodes"
```
### Redis 异常
您可以通过以下命令检查 Redis 服务是否正常:
```
kubectl -n kubesphere-system logs -l app=ks-console
kubectl -n kubesphere-system get po | grep -v Running
# High Availability
kubectl -n kubesphere-system exec -it redis-ha-server-0 redis-cli info replication
kubectl -n kubesphere-system exec -it redis-ha-server-0 -- sh -c 'for i in `seq 0 2`; do nc -vz redis-ha-server-$i.redis-ha.kubesphere-system.svc 6379; done'
kubectl -n kubesphere-system logs -l app=redis-ha-haproxy
kubectl -n kubesphere-system logs -l app=redis-ha
# Single Replica
kubectl -n kubesphere-system logs -l app=redis
```
相关错误日志:
> 1344:C 17 Sep 2020 17:13:18.099 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle
1:M 17 Sep 2020 17:13:18.198 # Background saving error
1:M 17 Sep 2020 17:13:24.014 * 1 changes in 3600 seconds. Saving...
1:M 17 Sep 2020 17:13:24.015 * Background saving started by pid 1345
1345:C 17 Sep 2020 17:13:24.016 # Failed opening the RDB file dump.rdb (in server root dir /data) for saving: Stale file handle
1:M 17 Sep 2020 17:13:24.115 # Background saving error
> E0909 07:05:22.770468 1 redis.go:51] unable to reach redis host EOF
> [WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R0 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R1 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[WARNING] 252/094143 (6) : Server check_if_redis_is_master_0/R2 is DOWN, reason: Layer7 timeout, info: " at step 5 of tcp-check (expect string '10.223.2.232')", check duration: 1000ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[ALERT] 252/094143 (6) : backend 'check_if_redis_is_master_0' has no server available!
**解决方式**
您需要先恢复 Redis 服务保证其正常运行并且pod之间网络可以正常联通稍后您可以重启 ks-console 以立即同步副本之间的数据。
```
kubectl -n kubesphere-system rollout restart deploy ks-console
```

View File

@ -1,28 +1,28 @@
---
title: "集成您自己的 Prometheus"
keywords: "Monitoring, Prometheus, node-exporter, kube-state-metrics, KubeSphere, Kubernetes"
description: "Use your own Prometheus stack for KubeSphere monitoring"
keywords: "监控, Prometheus, node-exporter, kube-state-metrics, KubeSphere, Kubernetes"
description: "将您自己的 Prometheus 堆栈用于 KubeSphere 监控"
linkTitle: "集成您自己的 Prometheus"
Weight: 16330
---
KubeSphere comes with several pre-installed customized monitoring components including Prometheus Operator, Prometheus, Alertmanager, Grafana (Optional), various ServiceMonitors, node-exporter, and kube-state-metrics. These components might already exist before you install KubeSphere. It is possible to use your own Prometheus stack setup in KubeSphere v3.0.0 .
KubeSphere 自带一些预装的自定义监控组件,包括 Prometheus Operator、Prometheus、Alertmanager、Grafana可选、各种 ServiceMonitor、node-exporter 和 kube-state-metrics。在您安装 KubeSphere 之前,这些组件可能已经存在。在 KubeSphere 3.0 中,您可以使用自己的 Prometheus 堆栈设置。
## Steps to Bring Your Own Prometheus
## 集成您自己的 Prometheus 的步骤
To use your own Prometheus stack setup, the steps are listed as below:
要使用您自己的 Prometheus 堆栈设置,请执行以下步骤:
1. Uninstall the customized Prometheus stack of KubeSphere
1. 卸载 KubeSphere 的自定义 Prometheus 堆栈
2. Install your own Prometheus stack
2. 安装您自己的 Prometheus 堆栈
3. Install KubeSphere customized stuff to your Prometheus stack
3. 将 KubeSphere 自定义组件安装至您的 Prometheus 堆栈
4. Change KubeSphere's `monitoring endpoint`
4. 更改 KubeSphere 的 `monitoring endpoint`
### Step 1. Uninstall the customized Prometheus stack of KubeSphere
### 步骤 1卸载 KubeSphere 的自定义 Prometheus 堆栈
1. Execute the following commands to uninstall the stack:
1. 执行以下命令,卸载堆栈:
```bash
kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/alertmanager/ 2>/dev/null
@ -38,17 +38,17 @@ To use your own Prometheus stack setup, the steps are listed as below:
kubectl -n kubesphere-system exec $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -- kubectl delete -f /kubesphere/kubesphere/prometheus/init/ 2>/dev/null
```
2. Delete the PVC that Prometheus used.
2. 删除 Prometheus 使用的 PVC。
```bash
kubectl -n kubesphere-monitoring-system delete pvc `kubectl -n kubesphere-monitoring-system get pvc | grep -v VOLUME | awk '{print $1}' | tr '\n' ' '`
```
### Step 2. Install your own Prometheus stack
### 步骤 2安装您自己的 Prometheus 堆栈
{{< notice note >}}
KubeSphere 3.0.0 was certified to work well with the following Prometheus stack components:
KubeSphere 3.0 已经过认证,可以与以下 Prometheus 堆栈组件搭配使用:
- Prometheus Operator **v0.38.3+**
- Prometheus **v2.20.1+**
@ -56,100 +56,100 @@ KubeSphere 3.0.0 was certified to work well with the following Prometheus stack
- kube-state-metrics **v1.9.6**
- node-exporter **v0.18.1**
Make sure your Prometheus stack components' version meets these version requirements especially **node-exporter** and **kube-state-metrics**.
请确保您的 Prometheus 堆栈组件版本符合上述版本要求,尤其是 **node-exporter****kube-state-metrics**
Make sure you install **node-exporter** and **kube-state-metrics** if only **Prometheus Operator** and **Prometheus** were installed. **node-exporter** and **kube-state-metrics** are required for KubeSphere to work properly.
如果只安装了 **Prometheus Operator****Prometheus**,请您务必安装 **node-exporter****kube-state-metrics**。**node-exporter** 和 **kube-state-metrics** 是 KubeSphere 正常运行的必要条件。
**If you've already had the entire Prometheus stack up and running, you can skip this step.**
**如果整个 Prometheus 堆栈已经启动并运行,您可以跳过此步骤。**
{{</ notice >}}
The Prometheus stack can be installed in many ways. The following steps show how to install it into the namespace `monitoring` using **upstream `kube-prometheus`**.
Prometheus 堆栈可以通过多种方式进行安装。下面的步骤演示如何使用**上游 `kube-prometheus`** 将 Prometheus 堆栈安装至命名空间 `monitoring` 中。
1. Get kube-prometheus version v0.6.0 whose node-exporter's version v0.18.1 matches the one KubeSphere v3.0.0 is using.
1. 获取 v0.6.0 版 kube-prometheus它的 node-exporter 版本为 v0.18.1,与 KubeSphere 3.0 所使用的版本相匹配。
```bash
cd ~ && git clone https://github.com/prometheus-operator/kube-prometheus.git && cd kube-prometheus && git checkout tags/v0.6.0 -b v0.6.0
```
2. Setup the `monitoring` namespace, and install Prometheus Operator and corresponding roles:
2. 设置命名空间 `monitoring`,安装 Prometheus Operator 和相应角色:
```bash
kubectl apply -f manifests/setup/
```
3. Wait until Prometheus Operator is up and running.
3. 稍等片刻待 Prometheus Operator 启动并运行。
```bash
kubectl -n monitoring get pod --watch
```
4. Remove unnecessary components such as Prometheus Adapter.
4. 移除不必要组件,例如 Prometheus Adapter。
```bash
rm -rf manifests/prometheus-adapter-*.yaml
```
5. Change kube-state-metrics to the same version v1.9.6 as KubeSphere v3.0.0 is using.
5. 将 kube-state-metrics 的版本变更为 KubeSphere 3.0 所使用的 v1.9.6。
```bash
sed -i 's/v1.9.5/v1.9.6/g' manifests/kube-state-metrics-deployment.yaml
```
6. Install Prometheus, Alertmanager, Grafana, kube-state-metrics, and node-exporter. You can only install kube-state-metrics or node-exporter by only applying the yaml file `kube-state-metrics-*.yaml` or `node-exporter-*.yaml`.
6. 安装 Prometheus、Alertmanager、Grafana、kube-state-metrics 以及 node-exporter。您可以只应用 YAML 文件 `kube-state-metrics-*.yaml``node-exporter-*.yaml` 来分别安装 kube-state-metrics 或 node-exporter。
```bash
kubectl apply -f manifests/
```
### Step 3. Install KubeSphere customized stuff to your Prometheus stack
### 步骤 3将 KubeSphere 自定义组件安装至您的 Prometheus 堆栈
{{< notice note >}}
KubeSphere 3.0.0 uses Prometheus Operator to manage Prometheus/Alertmanager config and lifecycle, ServiceMonitor (to manage scrape config), and PrometheusRule (to manage Prometheus recording/alert rules).
KubeSphere 3.0 使用 Prometheus Operator 来管理 Prometheus/Alertmanager 配置和生命周期、ServiceMonitor用于管理抓取配置和 PrometheusRule用于管理 Prometheus 记录/告警规则)。
There are a few items listed in [KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml), among which `prometheus-rules.yaml` and `prometheus-rulesEtcd.yaml` are required for KubeSphere v3.0.0 to work properly and others are optional. You can remove `alertmanager-secret.yaml` if you don't want your existing Alertmanager's config to be overwritten. You can remove `xxx-serviceMonitor.yaml` if you don't want your own ServiceMonitors to be overwritten (KubeSphere customized ServiceMonitors discard many irrelevant metrics to make sure Prometheus only stores the most useful metrics).
[KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml) 中列出了一些条目,其中 `prometheus-rules.yaml``prometheus-rulesEtcd.yaml` 是 KubeSphere 3.0 正常运行的必要条件,其他均为可选。如果您不希望现有 Alertmanager 的配置被覆盖,您可以移除 `alertmanager-secret.yaml`。如果您不希望自己的 ServiceMonitor 被覆盖KubeSphere 自定义的 ServiceMonitor 弃用许多无关指标,以便 Prometheus 只存储最有用的指标),您可以移除 `xxx-serviceMonitor.yaml`
If your Prometheus stack setup isn't managed by Prometheus Operator, you can skip this step. But you have to make sure that:
如果您的 Prometheus 堆栈不是由 Prometheus Operator 进行管理,您可以跳过此步骤。但请务必确保:
- You must copy the recording/alerting rules in [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) and [PrometheusRule for ETCD](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) to your Prometheus config for KubeSphere v3.0.0 to work properly.
- 您必须将 [PrometheusRule](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rules.yaml) [PrometheusRule for ETCD](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/prometheus-rulesEtcd.yaml) 中的记录/告警规则复制至您的 Prometheus 配置中,以便 KubeSphere 3.0 能够正常运行。
- Configure your Prometheus to scrape metrics from the same targets as the ServiceMonitors listed in [KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml).
- 配置您的 Prometheus使其抓取指标的目标 (Target) 与 [KubeSphere kustomization](https://github.com/kubesphere/kube-prometheus/blob/ks-v3.0/kustomize/kustomization.yaml) 中列出的 ServiceMonitor 的目标相同。
{{</ notice >}}
1. Get KubeSphere v3.0.0 customized kube-prometheus.
1. 获取 KubeSphere 3.0 的自定义 kube-prometheus。
```bash
cd ~ && mkdir kubesphere && cd kubesphere && git clone https://github.com/kubesphere/kube-prometheus.git && cd kube-prometheus/kustomize
```
2. Change the namespace to your own in which the Prometheus stack is deployed. For example, it is `monitoring` if you install Prometheus in the `monitoring` namespace following Step 2.
2. 将命名空间更改为您自己部署 Prometheus 堆栈的命名空间。例如,如果您按照步骤 2 将 Prometheus 安装在命名空间 `monitoring` 中,这里即为 `monitoring`
```bash
sed -i 's/my-namespace/<your own namespace>/g' kustomization.yaml
```
3. Apply KubeSphere customized stuff including Prometheus rules, Alertmanager config, and various ServiceMonitors.
3. 应用 KubeSphere 自定义组件,包括 Prometheus 规则、Alertmanager 配置和各种 ServiceMonitor 等。
```bash
kubectl apply -k .
```
4. Setup Services for kube-scheduler and kube-controller-manager metrics exposure.
4. 配置服务 (Service) 用于暴露 kube-scheduler 和 kube-controller-manager 指标。
```bash
kubectl apply -f ./prometheus-serviceKubeScheduler.yaml
kubectl apply -f ./prometheus-serviceKubeControllerManager.yaml
```
5. Find the Prometheus CR which is usually Kubernetes in your own namespace.
5. 在您自己的命名空间中查找 Prometheus CR通常为 Kubernetes。
```bash
kubectl -n <your own namespace> get prometheus
```
6. Set the Prometheus rule evaluation interval to 1m to be consistent with the KubeSphere v3.0.0 customized ServiceMonitor. The Rule evaluation interval should be greater or equal to the scrape interval.
6. 将 Prometheus 规则评估间隔设置为 1m与 KubeSphere 3.0 的自定义 ServiceMonitor 保持一致。规则评估间隔应大于或等于抓取间隔。
```bash
kubectl -n <your own namespace> patch prometheus k8s --patch '{
@ -159,31 +159,31 @@ If your Prometheus stack setup isn't managed by Prometheus Operator, you can ski
}' --type=merge
```
### Step 4. Change KubeSphere's `monitoring endpoint`
### 步骤 4更改 KubeSphere 的 `monitoring endpoint`
Now that your own Prometheus stack is up and running, you can change KubeSphere's monitoring endpoint to use your own Prometheus.
您自己的 Prometheus 堆栈现在已启动并运行,您可以更改 KubeSphere 的监控 Endpoint 来使用您自己的 Prometheus。
1. Edit `kubesphere-config` by running the following command:
1. 运行以下命令,编辑 `kubesphere-config`
```bash
kubectl edit cm -n kubesphere-system kubesphere-config
```
2. Navigate to the `monitoring endpoint` section as below:
2. 搜寻到 `monitoring endpoint` 部分,如下所示:
```bash
monitoring:
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
```
3. Change `monitoring endpoint` to your own Prometheus:
3. `monitoring endpoint` 更改为您自己的 Prometheus
```bash
monitoring:
endpoint: http://prometheus-operated.monitoring.svc:9090
```
4. Run the following command to restart the KubeSphere APIServer.
4. 运行以下命令,重启 KubeSphere APIserver。
```bash
kubectl -n kubesphere-system rollout restart deployment/ks-apiserver
@ -191,6 +191,6 @@ Now that your own Prometheus stack is up and running, you can change KubeSphere'
{{< notice warning >}}
If you enable/disable KubeSphere pluggable components following [this guide](https://kubesphere.io/docs/pluggable-components/overview/) , the `monitoring endpoint` will be reset to the original one. In this case, you have to change it to the new one and then restart the KubeSphere APIServer again.
如果您按照[此指南](../../../pluggable-components/overview/)启用/禁用 KubeSphere 可插拔组件,`monitoring endpoint` 会重置为初始值。此时,您需要再次将其更改为您自己的 Prometheus 并重启 KubeSphere APIserver。
{{</ notice >}}

View File

@ -11,7 +11,7 @@ weight: 3110
作为 [GitHub](https://github.com/kubesphere) 上的开源项目KubeSphere 是一个有成千上万的社区用户的聚集地,他们中的许多人已经把 KubeSphere 运行在生产环境中。
KubeSphere 有多安装方式,请注意,这些安装方式不是互斥的。例如,您可以在离线环境中的多个节点上以最小化方式部署 KubeSphere。
KubeSphere 有多安装方式,请注意,这些安装方式不是互斥的。例如,您可以在离线环境中的多个节点上以最小化方式部署 KubeSphere。
- [All-in-One](../../../quick-start/all-in-one-on-linux/):在单个节点上安装 KubeSphere仅用于用户快速熟悉 KubeSphere。
- [多节点安装](../multioverview/):在多个节点上安装单 master 的 KubeSphere用于测试或开发。

View File

@ -227,7 +227,7 @@ spec:
`internalAddress`:实例的私有 IP 地址。
- 在本教程中,端口 22 是 SSH 的默认端口,因此您无需将它添加至 YAML 文件中。否则,您需要在 IP 地址后添加对应端口号。例如:
- 在本教程中,端口 22 是 SSH 的默认端口,因此您无需将它添加至 YAML 文件中。否则,您需要在 IP 地址后添加对应端口号。例如:
```yaml
hosts:

View File

@ -1,92 +1,92 @@
---
title: "Advantages"
keywords: "KubeSphere, Kubernetes, Advantages"
description: "KubeSphere Advantages"
title: "为什么选择 KubeSphere"
keywords: "KubeSphere, Kubernetes, 优势"
description: "KubeSphere 优势"
linkTitle: "为什么选择 KubeSphere"
weight: 1600
---
## Vision
## 设计愿景
Kubernetes has become the de facto standard for deploying containerized applications at scale in private, public and hybrid cloud environments. However, many people can easily get confused when they start to use Kubernetes as it is complicated and has many additional components to manage. Some components need to be installed and deployed by users themselves, such as storage and network services. At present, Kubernetes only provides open-source solutions or projects, which can be difficult to install, maintain and operate to some extent. For users, it is not always easy to quickly get started as they are faced with a steep learning curve.
Kubernetes 已经成为在私有云、共有云和混合云等环境中大规模部署容器化应用程序的事实标准。然而,很多人使用 Kubernetes 仍会不知所措,因为 Kubernetes 本身的使用复杂需要管理的组件繁多部分组件需要自行安装和部署比如存储和网络部分。目前Kubernetes 仅提供开源的解决方案或项目,可能在某种程度上难以安装、维护和操作。对于用户而言,学习成本和门槛都很高,快速上手并不是一件易事。
KubeSphere is designed to reduce or eliminate many Kubernetes headaches related to building, deployment, management, observability and so on. It provides comprehensive services and automates provisioning, scaling and management of applications so that you can focus on code writing. More specifically, KubeSphere boasts an extensive portfolio of features including multi-cluster management, application lifecycle management, multi-tenant management, CI/CD pipelines, service mesh, and observability (monitoring, logging, alerting, auditing, events and notification).
KubeSphere 旨在解决 Kubernetes 在构建、部署、管理和可观察性等方面的痛点提供全面的服务和自动化的应用供应、伸缩和管理让您专注于代码编写。具体来说KubeSphere 包含多种功能如多集群管理、应用程序生命周期管理、多租户管理、CI/CD 流水线、微服务治理和可观察性(监控日志、告警通知和审计事件)等。
As a comprehensive open-source platform, KubeSphere strives to make the container platform more user-friendly and powerful. With a highly responsive web console, KubeSphere provides a graphic interface for developing, testing and operating, which can be easily accessed in a browser. For users who are accustomed to command-line tools, they can quickly get familiar with KubeSphere as kubectl is also integrated in the fully-functioning web console. With the responsive UI design, users can create, modify and create their apps and resources with a minimal learning curve.
作为一个综合性的开源平台KubeSphere 致力于提供更加友好的用户体验更强大的操作功能。例如KubeSphere 的交互式 Web 控制台方便用户直接在平台上进行测试和操作,同时还内置了命令行工具 Kubectl让习惯使用命令行操作的用户也能快速上手以最低的学习成本轻松地在平台上创建和修改各类资源。
In addition, KubeSphere offers excellent solutions to storage and network. Apart from the major open-source storage solutions such as Ceph RBD and GlusterFS, users are also provided with [QingCloud Block Storage](https://docs.qingcloud.com/product/storage/volume/) and [QingStor NeonSAN](https://docs.qingcloud.com/product/storage/volume/super_high_performance_shared_volume/), developed by QingCloud for persistent storage. With the integrated QingCloud CSI and NeonSAN CSI plugins, enterprises can enjoy a more stable and secure services of their apps and data.
此外KubeSphere 在存储和网络方面提供了最优的解决方案,比如存储除了支持流行的开源共享存储如 Ceph RBD 和 GlusterFS 之外,还提供[青云QingCloud 云平台块存储](https://docs.qingcloud.com/product/storage/volume/)和青云QingCloud 自研的[分布式存储 QingStor NeonSAN](https://docs.qingcloud.com/product/storage/volume/super_high_performance_shared_volume/) 作为 Kubernetes 的持久化存储,通过集成的 QingCloud CSI 和 NeonSAN CSI 插件即可使用青云QingCloud 提供的高性能块存储或 NeonSAN 作为存储卷挂载至工作负载,为企业应用和数据提供更稳定安全的存储服务。
## Why KubeSphere
## 为什么选择 KubeSphere
KubeSphere provides high-performance and scalable container service management for enterprises. It aims to help them accomplish digital transformation driven by cutting-edge technologies, and accelerate app iteration and business delivery to meet the ever-changing needs of enterprises.
KubeSphere 为企业用户提供高性能可伸缩的容器应用管理服务,旨在帮助企业完成新一代互联网技术驱动下的数字化转型,加速应用的快速迭代与业务交付,以满足企业日新月异的业务需求。
Here are the six major advantages that make KubeSphere stand out among its counterparts.
以下是 KubeSphere 的六大主要优势。
### Unified Management of Clusters across Cloud Providers
### 跨云厂商的多集群统一管理
As container usage ramps up, enterprises are faced with increased complexity of cluster management as they deploy clusters across cloud and on-premises environments. To address the urgent need of users for a uniform platform to manage heterogeneous clusters, KubeSphere sees a major feature enhancement with substantial benefits. Users can leverage KubeSphere to manage, monitor, import, operate and retire clusters across regions, clouds and environments.
随着容器应用的日渐普及各个企业跨云或在本地环境中部署多个集群而集群管理的复杂程度也在不断增加。为满足用户统一管理多个异构集群的需求KubeSphere 配备了全新的多集群管理功能,帮助用户跨区、跨云等多个环境管理、监控、导入和运维多个集群,全面提升用户体验。
The feature can be enabled both before and after the installation, giving users great flexibility as they make their own decisions to use KubeSphere for their specific issues. In particular, it features:
多集群功能可在安装 KubeSphere 之前或之后启用。具体来说,该功能有两大特性:
**Unified Management**. Users can import Kubernetes clusters either through direct connection or with an agent. With simple configurations, the process can be done within minutes in the interactive console. Once clusters are imported, users are able to monitor the status and operate on cluster resources in a unified way.
**统一管理**:用户可以使用直接连接或间接连接导入 Kubernetes 集群。只需简单配置,即可在数分钟内在 KubeSphere 的互动式 Web 控制台上完成整个流程。集群导入后,用户可以通过统一的中央控制平面监控集群状态、运维集群资源。
**High Availability**. This is extremely useful when it comes to disaster recovery. A cluster can run major services with another one serving as the backup. When the major one goes down, services can be quickly taken over by another cluster. The logic is quite similar to the case when clusters are deployed in different regions, as requests can be sent to the closest one for low latency. In short, high availability is achieved across zones and clusters.
**高可用**:在多集群架构中,一个集群可以运行主要服务,于此同时由另一集群作为备用。一旦该主集群宕机,备用集群可以迅速接管相关服务。此外,当集群跨区域部署时,为最大限度地减少延迟,请求可以发送至距离最近的集群,由此实现跨区跨集群的高可用。
For more information, see [Multi-cluster Management](../../multicluster-management/).
有关更多信息,请参见[多集群管理](../../multicluster-management/)。
### Powerful Observability
### 强大的可观察性功能
The observability feature of KubeSphere has been greatly improved with key building blocks enhanced, including monitoring, logging, auditing, events, alerting and notification. The highly functional system allows users to observe virtually everything that happens in the platform. It has much to offer for users with distinct advantages listed as below:
KubeSphere 的可观察性功能在 v3.0 中全面升级,进一步优化与改善了其中的重要组件,包括监控日志、审计事件以及告警通知。用户可以借助 KubeSphere 强大的监控系统查看平台中的各类数据,该系统主要的优势包括:
**Customized**. Users are allowed to customize their own monitoring dashboard with multiple display forms available. They can set their own templates based on their needs, add the metric they want to monitor and even choose the display color they prefer. Alerting policies and rules can all be customized as well, including repetition interval, time and threshold.
**自定义配置**:用户可以为应用自定义监控面板,有多种模板和图表模式可供选择。用户可按需添加想要监控的指标,甚至选择指标在图表上所显示的颜色。此外,也可自定义告警策略与规则,包括告警间隔、次数和阈值等。
**Diversified**. Ops teams are freed from the complicated work of recording massive data as KubeSphere monitors resources from virtually all dimensions. It also features an efficient notification system with diversified channels for users to choose from.
**全维度数据监控与查询**KubeSphere 提供全维度的资源监控数据将运维团队从繁杂的数据记录工作中彻底解放同时配备了高效的通知系统支持多种通知渠道包括电子邮件、Slack 与企业微信等。基于 KubeSphere 的多租户管理体系,不同租户可以在控制台上查询对应的监控日志与审计事件,支持关键词过滤、模糊匹配和精确匹配。
**Visualized and Interactive**. KubeSphere presents users with a graphic web console, especially for the monitoring of different resources. They are displayed in highly interactive graphs that give users a clear view of what is happening inside a cluster. Resources at different levels can also be sorted based on their usage, which is convenient for users to compare for further data analysis.
**图形化交互式界面设计**KubeSphere 为用户提供图形化 Web 控制台,便于从不同维度监控各个资源。资源的监控数据会显示在交互式图表上,详细记录集群中的资源用量情况。不同级别的资源可以根据用量进行排序,方便用户对数据进行对比与分析。
**Accurate**. The entire monitoring system functions at second-level precision that allow users to quickly locate any component failures. In terms of events and auditing, all activities are accurately recorded for future reference.
**高精度秒级监控**:整个监控系统提供秒级监控数据,帮助用户快速定位组件异常。此外,所有审计事件均会准确记录在 KubeSphere 中,便于后续数据分析。
For more information, see Project Administration and Usage.
有关更多信息,请参见[集群管理](../../cluster-administration/)、[项目用户指南](../../project-user-guide/)和[工具箱](../../toolbox/)。
### Automated DevOps
### 自动化 DevOps 流程
Automation represents a key part of implementing DevOps. With automatic, streamlined pipelines in place, users are better positioned to distribute apps in terms of continuous delivery and integration.
自动化是落地 DevOps 的重要组成部分,自动、精简的流水线为用户通过 CI/CD 流程交付应用提供了良好的条件。
**Jenkins-powered**. KubeSphere DevOps system is built with Jenkins as the engine, which is abundant in plugins. On top of that, Jenkins provides an enabling environment for extension development, making it possible for the DevOps team to work smoothly across the whole process (developing, testing, building, deploying, monitoring, logging, notifying, etc.) in a unified platform. The KubeSphere account can also be used for the built-in Jenkins, meeting the demand of enterprises for multi-tenant isolation of CI/CD pipelines and unified authentication.
**集成 Jenkins**KubeSphere DevOps 系统内置了 Jenkins 作为引擎支持多种第三方插件。此外Jenkins 为扩展开发提供了良好的环境DevOps 团队的整个工作流程可以在统一的平台上无缝对接包括开发测试、构建部署、监控日志和通知等。KubeSphere 的帐户可以用登录内置的 Jenkins满足企业对于 CI/CD 流水线和统一认证多租户隔离的需求。
**Convenient built-in tools**. Users can easily take advantage of automation tools (e.g. Binary-to-Image and Source-to-Image) even without a thorough understanding of how Docker or Kubernetes works. They only need to submit a registry address or upload binary files (e.g. JAR/WAR/Binary). Ultimately, services will be released to Kubernetes automatically without any coding in a Dockerfile.
**便捷的内置工具**:无需对 Docker 或 Kubernetes 的底层运作原理有深刻的了解,用户即可快速上手自动化工具,包括 Binary-to-Image 和 Source-to-Image。只需定义镜像仓库地址上传二进制文件例如 JAR/WAR/Binary即可将对应的服务自动发布至 Kubernetes无需编写 Dockerfile。
For more information, see DevOps Administration.
有关更多信息,请参见 [DevOps 用户指南](../../devops-user-guide/)。
### Fine-grained Access Control
### 细粒度权限控制
KubeSphere users are allowed to implement fine-grained access control across different levels, including clusters, workspaces and projects. Users with specific roles can operate on different resources if they are authorized to do so.
KubeSphere 为用户提供不同级别的权限控制,包括集群、企业空间和项目。拥有特定角色的用户可以操作对应的资源。
**Self-defined**. Apart from system roles, KubeSphere empowers users to define their roles with a spectrum of operations that they can assign to tenants. This meets the need of enterprises for detailed task allocation as they can decide who should be responsible for what while not being affected by irrelevant resources.
**自定义角色**除了系统内置的角色外KubeSphere 还支持自定义角色,用户可以给角色分配不同的权限以执行不同的操作,以满足企业对不同租户具体工作分配的要求,即可以定义每个租户所应该负责的部分,不被无关资源所影响。
**Secure**. As tenants at different levels are completely isolated from each other, they can share resources while not affecting one another. The network can also be completely isolated to ensure data security.
**安全**:由于不同级别的租户之前完全隔离,他们在贡献部分资源的同时也不会相互影响。租户之间的网络也完全隔离,确保数据安全。
For more information, see Role and Member Management in Workspace.
有关更多信息,请参见[企业空间](../../workspace-administration/role-and-member-management/)和[项目](../../project-administration/role-and-member-management/)中的角色和成员管理。
### Out-of-Box Microservices Governance
### 开箱即用的微服务治理
On the back of Istio, KubeSphere features major grayscale strategies. All these features are out of the box, which means consistent user experiences without any code hacking. Traffic control, for example, plays an essential role in microservices governance. In this connection, Ops teams, in particular, are able to implement operational patterns (e.g. circuit breaking) to compensate for poorly behaving services. Here are two major reasons why you use microservices governance, or service mesh in KubeSphere:
KubeSphere 的微服务治理功能基于 Istio提供多个灰度策略。所有的功能均开箱即用支持无侵入 (Hack) 的微服务治理,提供一致的用户体验。以下是 KubeSphere 微服务治理(服务网格)的两大优势:
- **Comprehensive**. KubeSphere provides users with a well-diversified portfolio of solutions to traffic management, including canary release, blue-green deployment, traffic mirroring and circuit breaking. In addition, the distributed tracing feature also helps users monitor apps, locate failures, and improve performance.
- **Visualized**. With a highly responsive web console, KubeSphere allows users to view how microservices interconnect with each other in a straightforward way.
- **全面的微服务治理功能**KubeSphere 为用户提供多样化的流量管理功能,包括金丝雀发布、蓝绿部署、流量镜像、和熔断机制等。
- **可视化界面**KubeSphere 提供交互式 Web 控制台,让用户可以直观地查看微服务直接相互通信的情况,支持链路追踪、智能路由等完善的微服务治理功能,帮助用户快速监控应用,定位问题并提高系统性能。
KubeSphere aims to make service-to-service calls within the microservices architecture reliable and fast. For more information, see Project Administration and Usage.
KubeSphere 旨在为服务间的通信提供一个可靠、迅速的微服务架构。有关更多信息,请参见[灰度发布](../../project-user-guide/grayscale-release/overview/)。
### Vibrant Open Source Community
### 活跃的开源社区
As an open-source project, KubeSphere represents more than just a container platform for app deployment and distribution. We believe that a true open-source model focuses more on sharing, discussions and problem solving with everyone involved. Together with partners, ambassadors and contributors, and other community members, we file issues, submit pull requests, participate in meetups, and exchange ideas of innovation.
KubeSphere 作为一个开源项目不仅仅是一个用于应用部署与分发的容器平台。KubeSphere 团队认为真正的开源模式更专注于让所有人进行开放地分享与讨论并相互帮助解决问题。KubeSphere 团队携手合作伙伴、大使和贡献者,以及其他的社区成员,共同打造一个开源开放的社区,大家可以在其中提出问题、提交 PR、参与见面会并交换创新意见等。
At KubeSphere, we have the capabilities and technical know-how to help you share the benefits that the open-source model can offer. More importantly, we have community members from around the world who make everything here possible.
KubeSphere 社区具备充分的能力和技术知识,让大家能共享开源模式所带来的红利。更重要的是,这里也是来自世界各地的开源爱好者们的共同家园,正是由于他们的贡献 KubeSphere 才能取得今天的成就。
**Partners**. KubeSphere partners play a critical role in KubeSphere's go-to-market strategy. They can be app developers, technology companies, cloud providers or go-to-market partners, all of whom drive the community ahead in their respective aspects.
**合作伙伴**KubeSphere 合作伙伴对 KubeSphere 的 Go-to-Market 策略至关重要,合作伙伴可以是开发者、技术公司、云厂商或 Go-to-Market 合作伙伴,他们在各自的领域都推动着社区的发展。
**Ambassadors**. As community representatives, ambassadors promote KubeSphere in a variety of ways (e.g. activities, blogs and user cases) so that more people can join us.
**大使**:作为 KubeSphere 社区的代表,大使负责在多个方面(活动、博客和用户案例等)帮助推动 KubeSphere 的发展,让更多的人参与社区。
**Contributors**. KubeSphere contributors help the whole community by contributing to code or documentation. You don't need to be an expert while you can still make a different even it is a minor code fix or language improvement.
**贡献者**KubeSphere 贡献者通过贡献代码或文档等对整个社区进行贡献。就算您不是该领域的专家,无论是细微的代码修改或是语言改进,您的贡献也会帮助到整个社区。
For more information, see [Partner Program](https://kubesphere.io/partner/) and [Community Governance](https://kubesphere.io/contribution/).
有关更多信息,请参见[合作伙伴项目](https://kubesphere.io/partner/)和[社区治理](https://kubesphere.io/contribution/)。

View File

@ -22,7 +22,7 @@ KubeSphere 为用户屏蔽了基础设施底层复杂的技术细节,帮助企
## 支持在任意平台运行 KubeSphere
作为一个灵活的轻量级容器 PaaS 平台KubeSphere 对不同云生态系统的支持非常友好,因为它没有对原生 Kubernetes 本身有任何的侵入 (Hack)。换句话说KubeSphere 可以**部署并运行在任何基础架构以及所有版本兼容的 Kubernetes 集群**之上,包括虚拟机、物理机、数据中心、公有云和混合云等。
作为一个灵活的轻量级容器 PaaS 平台KubeSphere 对不同云生态系统的支持非常友好,因为它对原生 Kubernetes 本身有任何的侵入 (Hack)。换句话说KubeSphere 可以**部署并运行在任何基础架构以及所有版本兼容的 Kubernetes 集群**之上,包括虚拟机、物理机、数据中心、公有云和混合云等。
您可以选择在公有云和托管 Kubernetes 集群例如阿里云、AWS、青云QingCloud、腾讯云、华为云等上安装 KubeSphere**还可以导入和纳管已有的 Kubernetes 集群**。

View File

@ -66,7 +66,7 @@ ip-10-0-8-148.cn-north-1.compute.internal Ready <none> 78m v1.18.8-eks-
```bash
TOKEN=$(kubectl -n kubesphere-system get secret $(kubectl -n kubesphere-system get sa kubesphere -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 -d)
kubectl config set-credentials kubesphere --token=${TOKEN}
kubectl config set-credentials --current --user=kubesphere
kubectl config set-context --current --user=kubesphere
```
检查新的 kubeconfig。

View File

@ -8,7 +8,7 @@ weight: 6400
## 什么是 KubeSphere 日志系统
KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的、易于使用的日志系统。它涵盖了不同层级的日志包括租户、基础设施资源和应用。用户可以从项目、工作负载、Pod 和关键字等不同维度对日志进行搜索。与 Kibana 相比KubeSphere 基于租户的日志系统中,每个租户只能查看自己的日志,从而可以在租户之间提供更好的隔离性和安全性。除了 KubeSphere 自身的日志系统,容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。
KubeSphere 为日志收集、查询和管理提供了一个强大的、全面的、易于使用的日志系统。它涵盖了不同层级的日志包括租户、基础设施资源和应用。用户可以从项目、工作负载、Pod 和关键字等不同维度对日志进行搜索。与 Kibana 相比KubeSphere 基于租户的日志系统中,每个租户只能查看自己的日志,从而可以在租户之间提供更好的隔离性和安全性。除了 KubeSphere 自身的日志系统,容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。
有关更多信息,请参见[日志查询](../../toolbox/log-query/)。

View File

@ -0,0 +1,90 @@
---
title: "落盘日志收集"
keywords: 'KubeSphere, Kubernetes, project, disk, log, collection'
description: '落盘日志收集'
linkTitle: "落盘日志收集"
weight: 13600
---
KubeSphere supports multiple log collection methods so that Ops teams can collect, manage and analyze logs in a unified and flexible way.
This tutorial demonstrates how to collect disk logs for an example app.
## Prerequisites
You need to create a workspace, a project and an account (`project-admin`). The account must be invited to the project with the role of `admin` at the project level. For more information, see [Create Workspaces, Projects, Accounts and Roles](../../quick-start/create-workspace-and-project).
## Enable Disk Log Collection
1. Log in to the web console of KubeSphere as `project-admin` and go to your project.
2. From the left navigation bar, select **Advanced Settings** in **Project Settings**. Under **Disk Log Collection**, enable the feature through the toggle switch.
![enable-disk-log-collection](/images/docs/project-administration/disk-log-collection/enable-disk-log-collection.png)
## Create a Deployment
1. From the left navigation bar, select **Workloads** in **Application Workloads**. Under the **Deployments** tab, click **Create**.
2. In the dialog that appears, set a name for the Deployment (e.g. `demo-deployment`) and click **Next**.
3. Under **Container Image**, click **Add Container Image**.
4. Enter `alpine` in the search bar to use the image (tag: `latest`) as an example.
![alpine-image](/images/docs/project-administration/disk-log-collection/alpine-image.png)
5. Scroll down to **Start Command** and check it. Input the following values for **Run Command** and **Parameters** respectively, click **√**, and then click **Next**.
**Run Command**
```bash
/bin/sh
```
**Parameters**
```bash
-c,if [ ! -d /data/log ];then mkdir -p /data/log;fi; while true; do date >> /data/log/app-test.log; sleep 30;done
```
{{< notice note >}}
The command and parameters above mean that the date information will be exported to `app-test.log` in `/data/log` every 30 seconds.
{{</ notice >}}
![run-command](/images/docs/project-administration/disk-log-collection/run-command.png)
6. On the **Mount Volumes** tab, enable **Disk Log Collection** and click **Add Volume**.
![mount-volumes](/images/docs/project-administration/disk-log-collection/mount-volumes.png)
7. On the **Temporary Volume** tab, input a name for the volume (e.g. `demo-disk-log-collection`) and set the access mode and path. Refer to the image below as an example.
![volume-example](/images/docs/project-administration/disk-log-collection/volume-example.png)
Click **√**, and then click **Next** to continue.
8. Click **Create** in **Advanced Settings** to finish the process.
{{< notice note >}}
For more information, see [Deployments](../../project-user-guide/application-workloads/deployments/).
{{</ notice >}}
## View Logs
1. Under the **Deployments** tab, click the Deployment just created to go to its detail page.
2. In **Resource Status**, click the arrow on the right to view container details, and then click the log icon of `logsidecar-container` (filebeat container) to inspect disk logs.
![container-log](/images/docs/project-administration/disk-log-collection/container-log.png)
![inspect-logs](/images/docs/project-administration/disk-log-collection/inspect-logs.png)
3. Alternatively, you can also use the **Log Search** function from **Toolbox** in the bottom right corner to view stdout logs. For example, use the Pod name of the Deployment for a fuzzy query:
![fuzzy-match](/images/docs/project-administration/disk-log-collection/fuzzy-match.png)

View File

@ -1,37 +1,37 @@
---
title: "定时任务 (CronJob)"
keywords: "KubeSphere, Kubernetes, jobs, cronjobs"
description: "创建 KubeSphere 定时任务 (CronJob)"
linkTitle: "定时任务 (CronJob)"
title: "定时任务"
keywords: "KubeSphere, Kubernetes, 任务, 定时任务"
description: "创建 KubeSphere 定时任务"
linkTitle: "定时任务"
weight: 10260
---
CronJobs 对于创建定期和重复执行的任务非常有用例如运行备份或发送电子邮件。CronJobs 还可以在特定时间或间隔执行单个任务,例如在集群可能处于空闲状态时安排任务。
定时任务 (CronJob) 对于创建周期性和重复性任务非常有用,例如运行备份或发送电子邮件。定时任务还可以在特定时间或间隔执行单个任务,例如在集群可能处于空闲状态时执行任务。
有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/).
有关更多信息,请参见 [Kubernetes 官方文档](https://kubernetes.io/zh/docs/concepts/workloads/controllers/cron-jobs/)
## 先决条件
## 准备工作
您需要创建一个企业空间、一个项目以及一个帐户 (`project-regular`)。必须邀请该帐户`operator` 身份加入该项目。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project)。
您需要创建一个企业空间、一个项目以及一个帐户 (`project-regular`)。必须邀请该帐户至该项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project)。
## 创建 CronJob
## 创建定时任务
### 步骤 1: 打开控制台
### 步骤 1:打开仪表板
`project-regular` 身份登录控制台。 转到项目的**应用负载**,选择**任务**,然后在**定时任务**选项卡下单击**创建**。
`project-regular` 身份登录控制台。转到项目的**任务**页面,然后在**定时任务**选项卡下点击**创建**。
![cronjob-list](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/cronjob-list.png)
![定时任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list.png)
### 步骤 2: 输入基本信息
### 步骤 2输入基本信息
输入基本信息。 您可以参考以下图像的每个字段。 完成后,单击**下一步**。
您可以参考下图在每个字段中输入基本信息。完成操作后,点击**下一步**。
![cronjob-create-basic-info](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/cronjob-create-basic-info.png)
![基本信息](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-create-basic-info.png)
- **名称**: CronJob 的名称,也是唯一的标识符。
- **别名**: CronJob 的别名, 帮助您更好的区分资源
- **定时计划**: 按照给定的时间计划运行任务。语法参照 [CRON](https://en.wikipedia.org/wiki/Cron) 。KubeSphere 中提供了一些预置 CRON 语句,以简化输入。该字段由`.spec.schedule`指定。对于此 CronJob输入`* / 1 * * * *`,这意味着它每分钟运行一次。
- **名称**:定时任务的名称,也是唯一标识符。
- **别名**:定时任务的别名,使资源易于识别
- **定时计划**:按照给定的时间计划运行任务。语法参照 [CRON](https://zh.wikipedia.org/wiki/Cron)。KubeSphere 中提供了一些预置 CRON 语句以简化输入。该字段由 `.spec.schedule` 指定。对于此定时任务,输入 `*/1 * * * *`,这意味着它每分钟运行一次。
| 类型 | CRON |
| ----------- | ----------- |
@ -42,75 +42,75 @@ CronJobs 对于创建定期和重复执行的任务非常有用,例如运行
- **高级设置 (执行参数)**:
- **启动 Job 的期限(秒)**. 由清单文件中的 `.spec.startingDeadlineSeconds` 指定,此可选字段表示如果由于任何原因错过计划时间,ConJob启动所需的最大秒数。错过执行的 CronJob 将被视为失败。 如果未指定此字段,则 CronJob 没有截止日期
- **保留完成 Job 数**. 由清单文件中的 `.spec.successfulJobsHistoryLimit` 指定,此字段表示要保留的成功 CronJob 执行次数。 如若未指定该字段,则默认值为 3。
- **保留失败 Job 数**. 由清单文件中的 `.spec.failedJobsHistoryLimit` 指定,此字段表示要保留的 CronJob 执行失败的次数。 如若未指定该字段,则默认值为 1。
- **并发策略**. 由 `.spec.concurrencyPolicy` 指定,它表示如何处理 Job 的并发执行。 有效值为:
- **Allow** (默认值): 允许CronJobs并发运行。
- **Forbid**: 禁止并发运行,如果前一个还没有完成,则直接跳过下一个
- **Replace**: 取消当前正在运行的 Job,用一个新的来替换。
- **启动 Job 的期限(秒)**由清单文件中的 `.spec.startingDeadlineSeconds` 指定,此可选字段表示如果由于任何原因错过计划时间,定时任务启动所需的最大秒数。错过执行的定时任务将被计为失败。如果未指定此字段,则此定时任务没有启动期限
- **保留完成 Job 数**由清单文件中的 `.spec.successfulJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行成功的次数,用于区分显式零和未指定这两种情况。默认值为 3。
- **保留失败 Job 数**:由清单文件中的 `.spec.failedJobsHistoryLimit` 指定,此字段表示要保留的定时任务执行失败的次数,用于区分显式零和未指定这两种情况。默认值为 1。
- **并发策略**:由 `.spec.concurrencyPolicy` 指定,它表示如何处理任务的并发执行。有效值为:
- **Allow** (默认值):允许定时任务并发运行。
- **Forbid**:禁止并发运行,如果前一个运行还没有完成,则跳过下一个运行
- **Replace**:取消当前正在运行的任务,用一个新的来替换。
{{< notice note >}}
您可以在右上角启用**编辑模式**查看此 CronJob 的 YAML 格式配置文件。
您可以在右上角开启**编辑模式**,查看此定时任务的 YAML 格式清单文件。
{{</ notice >}}
### 步骤 3: ConJob 设置 (可选)
### 步骤 3:定时任务设置(可选)
请参阅 [任务Jobs](../jobs/#step-3-job-settings-optional)。
请参考[任务](../jobs/#步骤-3任务设置可选)。
### 步骤 4: 设置镜像
### 步骤 4设置镜像
1. 在**容器镜像**里单击 **添加容器镜像** ,在搜索栏中输入 `busybox`
1. 点击**容器镜像**下的**添加容器镜像**,在搜索栏中输入 `busybox`
![input-busybox](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/input-busybox.png)
![输入 busybox](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/input-busybox.png)
2. 向下滚动到**启动命令** 然后在**参数**框中输入 `/bin/sh,-c,date; echo "KubeSphere!"`
2. 向下滚动到**启动命令**然后在**参数**框中输入 `/bin/sh,-c,date; echo "KubeSphere!"`
![start-command](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/start-command.png)
![启动命令](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/start-command.png)
3. 单击 **√** 完成镜像设置,然后单击**下一步**继续。
3. 点击 **√** 完成镜像设置,然后点击**下一步**继续。
![finish-image](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/finish-image.png)
![完成镜像设置](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/finish-image.png)
{{< notice note >}}
- 此示例 CronJob 打印字母 `KubeSphere`。 有关设置镜像的更多信息请参阅[容器镜像设置](../container-image-settings/)。
- 有关**重新启动策略**的更多信息,请参见[任务Job](../jobs/#step-4-set-image)。
- 您可以跳过本教程的**挂载存储**和**高级设置**。 有关更多信息,请参见在 Deployments 中[挂载存储](../deployments/#step-4-mount-volumes)和[配置高级设置](../deployments/#step-5-configure-advanced-settings)。
- 此示例定时任务输出 `KubeSphere`。有关设置镜像的更多信息,请参见[容器镜像设置](../container-image-settings/)。
- 有关**重启策略**的更多信息,请参见[任务](../jobs/#步骤-4设置镜像)。
- 您可以跳过本教程的**挂载存储**和**高级设置**。有关更多信息,请参见部署一文中的[挂载存储卷](../deployments/#步骤-4挂载存储卷)和[配置高级设置](../deployments/#步骤-5配置高级设置)。
{{</ notice >}}
### 步骤 5: 检查结果
### 步骤 5检查结果
1. 在**高级设置**的最后一步中,单击**创建**以完成。 如果创建成功,新项目将添加到 CronJob 列表中。 此外,您还可以在**任务Jobs**标签下找到作业任务。
1. 在最后一步**高级设置**中,点击**创建**完成操作。如果创建成功,定时任务列表中将添加一个新条目。此外,您还可以在**任务**选项卡下查看任务。
![cronjob-list-new](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/cronjob-list-new.png)
![定时任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-list-new.png)
![job-list](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/job-list.png)
![任务列表](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-list.png)
2. 在 **ConJobs** 选项卡下,单击此 CronJob然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。 由于将字段`successJobsHistoryLimit`设置为 3因此只记录了成功执行 3 次的任务
2. 在**定时任务**选项卡下,点击此定时任务,然后转到**任务记录**选项卡,您可以在其中查看每个执行记录的信息。由于**保留完成 Job 数**字段设置为 3因此这里显示定时任务成功执行 3 次
![execution-record](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/execution-record.png)
![执行记录](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/execution-record.png)
3. 单击其中任何一个,您将转到作业详细信息页面。
3. 点击任意记录,您将转到该任务的详情页面。
![job-detail-page](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/job-detail-page.png)
![任务详情页面](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/job-detail-page.png)
4. 在**资源状态**中,您可以检查 Pod 状态。击右侧的箭头,可以检查容器日志,如下所示,该日志显示预期输出。
4. 在**资源状态**中,您可以检查 Pod 状态。击右侧的箭头,可以检查容器日志,如下所示,该日志显示预期输出。
![container-log-1](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/container-log-1.png)
![容器日志-1](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-1.png)
![container-log-2](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/container-log-2.png)
![容器日志-2](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/container-log-2.png)
## CronJob 操作
## 定时任务操作
CronJob 详细信息页面上,您可以在创建 CronJob 之后对其进行管理。
定时任务详情页面上,您可以在创建定时任务之后对其进行管理。
- **编辑信息**: 编辑除了 CronJob `名称` 以外的基本信息
- **暂停/启动**: 暂停或启动 Cronjob。 暂停 CronJob 将告诉控制器暂停后续任务,这不适用于已经开始执行的任务
- **编辑 YAML**: 以 YAML 格式编辑 CronJob 的配置。
- **删除**: 删除 CronJob然后返回到 CronJob 列表页面。
- **编辑信息**:编辑基本信息,但无法编辑该定时任务的`名称`
- **暂停/启动**:暂停或启动该定时任务。暂停定时任务将告知控制器暂停后续执行任务,但已经启动的执行不受影响
- **编辑配置文件**:编辑该定时任务的 YAML 文件配置。
- **删除**:删除该定时任务,然后返回定时任务列表页面。
![cronjob-action](/images/docs/project-user-guide-zh/application-workloads-zh/cronjobs-zh/cronjob-action.png)
![定时任务操作](/images/docs/zh-cn/project-user-guide/application-workloads/cronjobs/cronjob-action.png)

View File

@ -1,5 +1,5 @@
---
linkTitle: "Examples"
linkTitle: "示例"
weight: 10811
_build:

View File

@ -1,98 +1,97 @@
---
title: "Monitor MySQL"
keywords: 'monitoring, prometheus, prometheus operator'
description: 'Monitor MySQL'
linkTitle: "Monitor MySQL"
title: "监控 MySQL"
keywords: '监控, Prometheus, Prometheus operator'
description: '监控 MySQL'
linkTitle: "监控 MySQL"
weight: 10812
---
From the [Introduction](../../introduction#indirect-exposing) section, you know it is not feasible to instrument MySQL with Prometheus metrics directly. To expose MySQL metrics in Prometheus format, you need to deploy MySQL exporter instead.
通过[介绍](../../../../project-user-guide/custom-application-monitoring/introduction/#间接暴露)一文,您了解到无法直接将 Prometheus 指标接入 MySQL。要以 Prometheus 格式暴露 MySQL 指标,您需要部署 MySQL 导出器 (Exporter)。
This tutorial walks you through an example of how to monitor MySQL metrics and visualize them.
本教程演示如何监控 MySQL 指标并将这些指标可视化。
## Prerequisites
## 准备工作
- Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). MySQL and MySQL exporter will be deployed from the App Store.
- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspace, Project, Account and Role](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-operator` and work in the project `demo` in the workspace `demo-workspace`.
- 请确保已[启用 OpenPitrix 系统](../../../../pluggable-components/app-store/)。MySQL 和 MySQL 导出器将通过应用商店来部署。
- 您需要创建一个企业空间、一个项目和一个帐户。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../../quick-start/create-workspace-and-project/)。该帐户需要是平台普通用户,将其邀请至项目中并赋予 `operator` 角色作为项目操作员。在本教程中,您以 `project-operator` 身份登录控制台,在 `demo-workspace` 企业空间中的 `demo` 项目下进行操作。
## Hands-on Lab
## 动手实验
### Step 1: Deploy MySQL
### 步骤 1部署 MySQL
To begin with, you [deploy MySQL from the App Store](../../../../application-store/built-in-apps/mysql-app/) and set the root password to `testing`. Please make sure you are landing on the **Overview** page of the project `demo`.
首先,请[从应用商店部署 MySQL](../../../../application-store/built-in-apps/mysql-app/),将 Root 密码设置为 `testing`
1. Go to **App Store**.
1. 转到 `demo` 项目,点击左上角的**应用商店**。
![go-to-app-store](/images/docs/project-user-guide/custom-application-monitoring/go-to-app-store.jpg)
![转到应用商店](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/go-to-app-store.PNG)
2. Find **MySQL** and click **Deploy**.
2. 找到 **MySQL**,点击**部署**。
![find-mysql](/images/docs/project-user-guide/custom-application-monitoring/find-mysql.jpg)
![找到 MySQL](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/find-mysql.PNG)
![click-deploy](/images/docs/project-user-guide/custom-application-monitoring/click-deploy.jpg)
![点击部署](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/click-deploy.PNG)
3. Make sure MySQL is deployed in `demo` and click **Next**.
3. 请确保将 MySQL 部署在 `demo` 项目,点击**下一步**。
![click-next](/images/docs/project-user-guide/custom-application-monitoring/click-next.jpg)
![点击下一步](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/click-next.PNG)
4. Uncomment the `mysqlRootPassword` field and click **Deploy**.
4. 取消 `mysqlRootPassword` 字段的注解,点击**部署**。
![uncomment-mysqlRootPassword](/images/docs/project-user-guide/custom-application-monitoring/uncomment-mysqlRootPassword.jpg)
![取消 mysqlRootPassword 注解](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/uncommet-mysqlrootpassword.PNG)
5. Wait until MySQL is up and running.
5. 稍等片刻待 MySQL 启动并运行。
![check-if-mysql-is-running](/images/docs/project-user-guide/custom-application-monitoring/check-if-mysql-is-running.jpg)
![Mysql 运行中](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/check-if-mysql-running.PNG)
### Step 2: Deploy MySQL exporter
### 步骤 2部署 MySQL 导出器
You need to deploy MySQL exporter in `demo` on the same cluster. MySQL exporter is responsible for querying MySQL status and reports the data in Prometheus format.
您需要在同一个集群上的 `demo` 项目中部署 MySQL 导出器。MySQL 导出器负责查询 MySQL 状态并报告 Prometheus 格式的数据。
1. Go to **App Store** and find **MySQL exporter**.
1. 转到**应用商店**,找到 **MySQL exporter**
![find-mysql-exporter](/images/docs/project-user-guide/custom-application-monitoring/find-mysql-exporter.jpg)
![找到 Mysql Exporter](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/find-mysql-exporter.PNG)
![exporter-click-deploy](/images/docs/project-user-guide/custom-application-monitoring/exporter-click-deploy.jpg)
![Exporter 点击部署](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/exporter-click-deploy.PNG)
2. Deploy MySQL exporter in `demo` again.
2. 部署 MySQL 导出器至 `demo` 项目。
![exporter-click-next](/images/docs/project-user-guide/custom-application-monitoring/exporter-click-next.jpg)
![Exporter 点击下一步](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/exporter-click-next.PNG)
3. Make sure `serviceMonitor.enabled` is set to `true`. The built-in MySQL exporter sets it to `true` by default, so you don't have to manually modify `serviceMonitor.enabled`.
3. 请确保将 `serviceMonitor.enabled` 设为 `true`。内置 MySQL 导出器默认将其设置为 `true`,故您无需手动修改 `serviceMonitor.enabled`
![set-servicemonitor-to-true](/images/docs/project-user-guide/custom-application-monitoring/set-servicemonitor-to-true.jpg)
![设置 servicemonitor 为 true](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/set-servicemonitor-to-true.PNG)
{{< notice warning >}}
Don't forget to enable the SericeMonitor CRD if you are using external exporter helm charts. Those charts usually disable ServiceMonitor by default and require manual modification.
如果您使用外部导出器的 Helm Chart请记得启用 ServiceMonitor CRD。此类 Chart 通常默认禁用 ServiceMonitor需要手动修改。
{{</ notice >}}
4. Modify MySQL connection parameters. MySQL exporter needs to connect to the target MySQL. In this tutorial, MySQL is installed with the service name `mysql-a8xgvx`. Set `mysql.host` to `mysql-a8xgvx`, `mysql.pass` to `testing`, and `user` to `root` as below. Note that your MySQL service may be created with **a different name**.
4. 修改 MySQL 连接参数。MySQL 导出器需要连接到目标 MySQL。在本教程中MySQL 以服务名 `mysql-8jkp3d` 进行安装。请将 `mysql.host` 设置为 `mysql-8jkp3d`,将 `mysql.pass` 设置为 `testing`,将 `user` 设置为 `root`,如下所示。请注意,您的 MySQL 服务创建后可能**名称不同**。
![mysql-conn-params](/images/docs/project-user-guide/custom-application-monitoring/mysql-conn-params.jpg)
![Mysql 连接参数](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/mysql-conn-params.PNG)
5. Click **Deploy** and wait until MySQL exporter is up and running.
5. 点击**部署**,稍等片刻待 MySQL 导出器启动并运行。
![exporter-click-deploy-2](/images/docs/project-user-guide/custom-application-monitoring/exporter-click-deploy-2.jpg)
![Exporter 点击部署-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/exporter-click-deploy-2.PNG)
![exporter-is-running](/images/docs/project-user-guide/custom-application-monitoring/exporter-is-running.jpg)
![Exporter 运行中](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/exporter-is-running.PNG)
### Step 3: Create Dashboard
### 步骤 3创建监控面板
After about two minutes, you can create a monitoring dashboard for MySQL and visualize metrics in real time.
大约两分钟后,您可以为 MySQL 创建监控面板,并将指标实时可视化。
1. Navigate to **Custom Monitoring** under **Monitoring & Alerting** and click **Create**.
1. 转到**监控告警**下的**自定义监控**,点击**创建**。
![navigate-to-custom-monitoring](/images/docs/project-user-guide/custom-application-monitoring/navigate-to-custom-monitoring.jpg)
![转到自定义监控](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/navigate-to-custom-monitoring.PNG)
2. In the dialogue that appears, name the dashboard as `mysql-overview` and choose **MySQL template**. Click **Create** to continue.
2. 在弹出对话框中,将监控面板命名为 `mysql-overview` 并选择 **MySQL 模板**。点击**创建**继续。
![create-mysql-dashboard](/images/docs/project-user-guide/custom-application-monitoring/create-mysql-dashboard.jpg)
![创建 Mysql 仪表板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/create-mysql-dashboard.PNG)
3. Save the template by clicking **Save Template** in the top right corner. A newly-created dashboard displays in the dashboard list as below.
3. 点击右上角的**保存模板**保存该模板。新创建的监控面板会在监控面板列表中显示,如下所示。
![save-mysql-template](/images/docs/project-user-guide/custom-application-monitoring/save-mysql-template.jpg)
![保存 Mysql 模板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/save-mysql-template.PNG)
![monitor-mysql-done](/images/docs/project-user-guide/custom-application-monitoring/monitor-mysql-done.jpg)
![监控 Mysql 配置完成](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-mysql/monitor-mysql-done.PNG)
{{< notice tip >}}
For more information about dashboard strings, see [Visualization](../../../../project-user-guide/custom-application-monitoring/visualization/overview/).
有关监控面板上各属性的更多信息,请参见[可视化](../../../../project-user-guide/custom-application-monitoring/visualization/overview/)。
{{</ notice >}}

View File

@ -1,119 +1,118 @@
---
title: "Monitor Sample Web"
keywords: 'monitoring, prometheus, prometheus operator'
description: 'Monitor Sample Web'
linkTitle: "Monitor Sample Web"
title: "监控示例 Web 应用程序"
keywords: '监控, prometheus, prometheus operator'
description: '监控示例 Web 应用程序'
linkTitle: "监控示例 Web 应用程序"
weight: 10813
---
This section walks you through monitoring a sample web application. The application is instrumented with Prometheus Go client in its code. Therefore, it can expose metrics directly without the help of exporters.
本教程演示如何监控示例 Web 应用程序。该应用程序在代码中已接入 Prometheus Go 客户端,因此可以直接暴露指标,无需使用导出器 (Exporter)。
## Prerequisites
## 准备工作
- Please make sure you [enable the OpenPitrix system](../../../../pluggable-components/app-store/).
- You need to create a workspace, a project, and a user account for this tutorial. For more information, see [Create Workspace, Project, Account and Role](../../../../quick-start/create-workspace-and-project/). The account needs to be a platform regular user and to be invited as the workspace self provisioner with the `self-provisioner` role. Namely, create an account `workspace-self-provisioner` of the `self-provisioner` role, and use this account to create a project (e.g. `test`). In this tutorial, you log in as `workspace-self-provisioner` and work in the project `test` in the workspace `demo-workspace`.
- 请确保[已启用 OpenPitrix 系统](../../../../pluggable-components/app-store/)。
- 您需要创建一个企业空间、一个项目和一个帐户。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../../quick-start/create-workspace-and-project/)。该帐户需要是平台普通用户,邀请至该企业空间中并赋予 `self-provisioner` 角色。故请创建一个 `workspace-self-provisioner` 帐户,赋予 `self-provisioner` 角色,并使用该帐户创建一个项目(例如 `test`)。在本教程中,您以 `workspace-self-provisioner` 身份登录控制台,并在 `demo-workspace` 企业空间的 `test` 项目中进行操作。
- Knowledge of Helm Chart and [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/).
- 了解 Helm Chart 和 [PromQL](https://prometheus.io/docs/prometheus/latest/querying/examples/)。
## Hands-on Lab
## 动手实验
### Step 1: Prepare Sample Web Application Image
### 步骤 1准备示例 Web 应用程序的镜像
First, prepare the sample web application image. The sample web application exposes a user-defined metric called `myapp_processed_ops_total`. It is a counter type metric that counts the number of operations that have been processed by far. The counter increases automatically by one every 2 seconds.
示例 Web 应用程序暴露一个名为 `myapp_processed_ops_total` 的用户定义指标。这是一个计数器型指标,计算已处理操作的数量。计数器每 2 秒自动增加 1。
This sample application exposes application-specific metrics via the endpoint `http://localhost:2112/metrics`.
该示例应用程序通过 Endpoint `http://localhost:2112/metrics` 暴露应用具体指标。
In this tutorial, you use the made-ready image `kubespheredev/promethues-example-app`. The source code can be found in [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app). You can also follow [Instrument A Go Application For Prometheus](https://prometheus.io/docs/guides/go-application/) in the official documentation of Prometheus.
在本教程中,您可以使用现成的镜像 `kubespheredev/promethues-example-app`。源代码请见 [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app)。您也可以按照 Prometheus 官方文档 [Instrument A Go Application For Prometheus](https://prometheus.io/docs/guides/go-application/) 进行操作。
### Step 2: Pack the Application into a Helm Chart
### 步骤 2将应用程序打包成 Helm Chart
Pack the Deployment, Service, and ServiceMonitor YAML template into a helm chat for reuse. In the Deployment and Service template, you define sample web container and the port for the metrics endpoint. ServiceMonitor is a custom resource defined and used by Prometheus Operator. It connects your application and KubeSphere monitoring engine (Prometheus) so that the engine knows where and how to scrape metrics. In future releases, KubeSphere will provide a graphical user interface for easy operation.
将部署、服务和 ServiceMonitor 的 YAML 模板打包成一个 Helm Chart 用来复用。在部署和服务模板中,您可以为指标 Endpoint 定义示例 Web 容器和端口。ServiceMonitor 是由 Prometheus Operator 定义和使用的自定义资源,它连接您的应用程序和 KubeSphere 监控引擎 (Prometheus)以便监控引擎知道抓取指标的位置和方式。KubeSphere 在未来发布版本中将提供易于操作的图形用户界面。
Find the source code in the folder `helm` in [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app). The helm chart package is made ready and is named as `prometheus-example-app-0.1.0.tgz`. Please download the .tgz file and you will use it in the next step.
源代码请见 [kubesphere/prometheus-example-app](https://github.com/kubesphere/prometheus-example-app)`helm` 文件夹。Helm Chart 包已准备完成并命名为 `prometheus-example-app-0.1.0.tgz`。请下载该 .tgz 文件,用于下面的步骤。
### Step 3: Upload the Helm Chart
### 步骤 3上传 Helm Chart
1. Go to the workspace **Overview** page of `demo-workspace` and navigate to **App Templates**.
1. `demo-workspace` 企业空间的**概览**页面上转到**应用模板**。
![app-template-create](/images/docs/project-user-guide/custom-application-monitoring/app-template-create.jpg)
![创建应用模板](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/app-template-create.PNG)
2. Click **Create** and upload `prometheus-example-app-0.1.0.tgz` as images below.
2. 点击**创建**,上传 `prometheus-example-app-0.1.0.tgz` 作为镜像,如下所示。
![click-create-app-template](/images/docs/project-user-guide/custom-application-monitoring/click-create-app-template.jpg)
![click-create-app-template](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-create-app-template.PNG)
![click-upload-app-template](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template.jpg)
![click-upload-app-template](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template.PNG)
![click-upload-app-template-2](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-2.jpg)
![click-upload-app-template-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-2.PNG)
![click-upload-app-template-4](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-4.jpg)
![click-upload-app-template-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-4.PNG)
![click-upload-app-template-5](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-5.jpg)
![click-upload-app-template-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-5.PNG)
![click-upload-app-template-6](/images/docs/project-user-guide/custom-application-monitoring/click-upload-app-template-6.jpg)
![click-upload-app-template-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/click-upload-app-template-6.PNG)
### Step 4: Deploy Sample Web Application
### 步骤 4部署示例 Web 应用程序
You need to deploy the sample web application into `demo`. For demonstration purposes, you can simply run a test deployment.
您需要将示例 Web 应用程序部署至 `test` 项目,可以简单运行一个测试部署用于演示。
1. Click `prometheus-example-app`.
1. 点击 `prometheus-example-app`
![deploy-sample-web-1](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-1.jpg)
![部署示例 Web 应用-1](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-1.PNG)
2. Expand the menu and click **Test Deploy**.
2. 展开菜单,点击**测试部署**。
![deploy-sample-web-2](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-2.jpg)
![部署示例 Web 应用-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-2.PNG)
![deploy-sample-web-3](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-3.jpg)
![部署示例 Web 应用-3](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-3.PNG)
3. Make sure you deploy the sample web application in `test` and click **Next**.
3. 请确保将示例 Web 应用程序部署至 `test` 项目,点击**下一步**。
![deploy-sample-web-4](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-4.jpg)
![部署示例 Web 应用-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-4.PNG)
4. Make sure `serviceMonitor.enabled` is set to `true` and click **Deploy**.
4. 请确保将 `serviceMonitor.enabled` 设置为 `true`,点击**部署**。
![deploy-sample-web-5](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-5.jpg)
![部署示例 Web 应用-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-5.PNG)
![deploy-sample-web-6](/images/docs/project-user-guide/custom-application-monitoring/deploy-sample-web-6.jpg)
![部署示例 Web 应用-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/deploy-sample-web-6.PNG)
5. In **Workloads** of the project `test`, wait until the sample web application is up and running.
5. `test` 项目的**工作负载**下,稍等片刻待示例 Web 应用程序启动并运行。
![create-dashboard-1](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-1.jpg)
![创建仪表板-1](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-1.PNG)
### Step 5: Create Dashboard
### 步骤 5创建监控面板
This section guides you on how to create a dashboard from scratch. You will create a text chart showing the total number of processed operations and a line chart for displaying the operation rate.
该部分演示如何从零创建监控面板。您需要创建一个显示已处理操作总数的文本图表和一个显示操作率的折线图。
1. Navigate to **Custom Monitoring** and click **Create**.
1. 转到**自定义监控**,点击**创建**。
![create-dashboard-2](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-2.jpg)
![创建仪表板-2](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-2.PNG)
2. Set a name (e.g. `sample-web`) and click **Create**.
2. 设置名称(例如 `sample-web`),点击**创建**。
![create-dashboard-3](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-3.jpg)
![创建仪表板-3](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-3.PNG)
3. Enter a title in the top left corner (e.g. `Sample Web Overview`).
3. 在左上角输入标题(例如 `示例 Web 概览`)。
![create-dashboard-4](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-4.jpg)
![创建仪表板-4](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-4.PNG)
4. Click the **plus icon** on the left column to create a text chart.
4. 点击左列的**加号图标**,创建文本图表。
![create-dashboard-5](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-5.jpg)
![创建仪表板-5](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-5.PNG)
5. Type the PromQL expression `myapp_processed_ops_total` in the field **Monitoring Metrics** and give a chart name (e.g. `Operation Count`). Click **√** in the bottom right corner to continue.
5. 在**监控指标**字段输入 PromQL 表达式 `myapp_processed_ops_total`,并设置图表名称(例如 `操作数`)。点击右下角的 **√** 继续。
![create-dashboard-6](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-6.jpg)
![创建仪表板-6](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-6.PNG)
6. Click **Add Monitoring Item** to create a line chart.
6. 点击**添加监控项**,创建折线图。
![create-dashboard-7](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-7.jpg)
![创建仪表板-7](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-7.PNG)
![create-dashboard-8](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-8.jpg)
![创建仪表板-8](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-8.PNG)
7. Type the PromQL expression `irate(myapp_processed_ops_total[3m])` for **Monitoring Metrics** and name the chart `Operation Rate`. To improve the appearance, you can set **Metric Name** to `{{service}}`. It will name each line with the value of the metric label `service`. Next, set **Decimal Places** to `2` so that the result will be truncated to two decimal places.
7. 在**监控指标**中输入 PromQL 表达式 `irate(myapp_processed_ops_total[3m])` 并将图表命名为 `操作率`。要改进外观,可以将**图例名称**设置为 `{{service}}`。它会用图例标签 `service` 的值命名每一段折线。然后将**精确位**设置为 `2`,以便将结果保留两位小数。
![create-dashboard-9](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-9.jpg)
![创建仪表板-9](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-9.PNG)
8. Click **Save Template** to save it.
8. 点击**保存模板**进行保存。
![create-dashboard-10](/images/docs/project-user-guide/custom-application-monitoring/create-dashboard-10.jpg)
![创建仪表板-10](/images/docs/zh-cn/project-user-guide/custom-application-monitoring/examples/monitor-sample-web-app/create-dashboard-10.PNG)

View File

@ -1,20 +1,20 @@
---
title: "Binary to Image: Publish an Artifact to Kubernetes"
title: "Binary to Image:发布制品到 Kubernetes"
keywords: "KubeSphere, Kubernetes, Docker, B2I, Binary-to-Image"
description: "How to publish artifacts to Kubernetes using Binary-to-Image"
linkTitle: "Binary to Image: Publish an Artifact to Kubernetes"
description: "如何使用 Binary-to-Image 发布制品到 Kubernetes"
linkTitle: "Binary to Image:发布制品到 Kubernetes"
weight: 10620
---
Binary-to-Image (B2I) is a toolkit and workflow for building reproducible container images from binary executables such as Jar, War, and binary packages. More specifically, you upload an artifact and specify a target repository such as Docker Hub or Harbor where you want to push the image. If everything runs successfully, your image will be pushed to the target repository and your application will be automatically deployed to Kubernetes if you create a Service in the workflow.
Binary-to-Image (B2I) 是一个工具箱和工作流,用于从二进制可执行文件(例如 Jar、War 和二进制包)构建可再现容器镜像。更确切地说,您可以上传一个制品并指定一个目标仓库,例如 Docker Hub 或者 Harbor用于推送镜像。如果一切运行成功会推送您的镜像至目标仓库并且如果您在工作流中创建服务 (Service),也会自动部署应用程序至 Kubernetes。
In a B2I workflow, you do not need to write any Dockerfile. This not only reduces learning costs but improves release efficiency, which allows users to focus more on business.
在 B2I 工作流中,您不需要编写 Dockerfile。这不仅能降低学习成本也能提升发布效率使用户更加专注于业务。
This tutorial demonstrates two different ways to build an image based on an artifact in a B2I workflow. Ultimately, the image will be released to Docker Hub.
本教程演示在 B2I 工作流中基于制品构建镜像的两种不同方式。最终,镜像会发布至 Docker Hub。
For demonstration and testing purposes, here are some example artifacts you can use to implement the B2I workflow:
以下是一些示例制品,用于演示和测试,您可以用来实现 B2I 工作流:
| Artifact Package | GitHub Repository |
| 制品包 | GitHub 仓库 |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
| [b2i-war-java8.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war) | [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) |
| [b2i-war-java11.war](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java11.war) | [springmvc5](https://github.com/kubesphere/s2i-java-container/tree/master/tomcat/examples/springmvc5) |
@ -22,167 +22,167 @@ For demonstration and testing purposes, here are some example artifacts you can
| [b2i-jar-java11.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java11.jar) | [ java-maven-example](https://github.com/kubesphere/s2i-java-container/tree/master/java/examples/maven) |
| [b2i-jar-java8.jar](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-jar-java8.jar) | [devops-java-sample](https://github.com/kubesphere/devops-java-sample) |
## Prerequisites
## 准备工作
- You have enabled the [KubeSphere DevOps System](../../installation/install-devops).
- You need to create a [Docker Hub](http://www.dockerhub.com/) account. GitLab and Harbor are also supported.
- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project).
- Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/set-ci-node/).
- 您已启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/)。
- 您需要创建一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。
- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),请务必邀请该帐户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project)。
- 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/set-ci-node/)。
## Create a Service Using Binary-to-Image (B2I)
## 使用 Binary-to-Image (B2I) 创建服务
The steps below show how to upload an artifact, build an image and release it to Kubernetes by creating a Service in a B2I workflow.
下图中的步骤展示了如何在 B2I 工作流中通过创建服务来上传制品、构建镜像并将其发布至 Kubernetes。
![service-build](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service-build.png)
![服务构建](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service-build.png)
### Step 1: Create a Docker Hub Secret
### 步骤 1创建 Docker Hub 密钥
You must create a Docker Hub Secret so that the Docker image created through B2I can be push to Docker Hub. Log in KubeSphere as `project-regular`, go to your project and create a Secret for Docker Hub. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets).
您必须创建 Docker Hub 密钥,以便将通过 B2I 创建的 Docker 镜像推送至 Docker Hub。以 `project-regular` 身份登录 KubeSphere转到您的项目并创建一个 Docker Hub 密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。
### Step 2: Create a Service
### 步骤 2创建服务
1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**.
1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。
![create-service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/create-service.jpg)
![创建服务](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/create-service.PNG)
2. Scroll down to **Build a New Service through the Artifact** and select **war**. This tutorial use the [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) project as a sample and uploads a war artifact to KubeSphere. Set a name, such as `b2i-war-java8`, and click **Next**.
2. 下拉至**通过制品构建新的服务**,选择 **war**。本教程使用 [spring-mvc-showcase](https://github.com/spring-projects/spring-mvc-showcase) 项目作为示例并上传 war 制品至 KubeSphere。设置一个名称例如 `b2i-war-java8`,点击**下一步**。
3. On the **Build Settings** page, provide the following information accordingly and click **Next**.
3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。
![build-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-settings.jpg)
![构建设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-settings.PNG)
**Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type).
**服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。
**Upload Artifact**: Upload the war artifact ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war)).
**上传制品**:上传 war 制品 ([b2i-war-java8](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-war-java8.war))
**Build Environment**: Select **kubesphere/tomcat85-java8-centos7:v2.1.0**.
**构建环境**:选择 **kubesphere/tomcat85-java8-centos7:v2.1.0**.
**imageName**: Enter `<DOCKERHUB_USERNAME>/<IMAGE NAME>` or `<HARBOR-PROJECT_NAME>/<IMAGE NAME>` as the image name.
**镜像名称**:输入 `<DOCKERHUB_USERNAME>/<IMAGE NAME>``<HARBOR-PROJECT_NAME>/<IMAGE NAME>` 作为镜像名称。
**tag**: The image tag. Enter `latest`.
**tag**:镜像标签,请输入 `latest`
**Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub.
**Target image repository**:镜像会推送至 Docker Hub故请选择 Docker Hub 密钥。
4. On the **Container Settings** page, scroll down to **Service Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-port`), and input `8080` for both **Container Port** and **Service Port**. Click **Next** to continue.
4. 在**容器设置**页面,下拉至**服务设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-port`**容器端口**和**服务端口**都输入 `8080`。点击**下一步**继续。
![container-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/container-settings.jpg)
![容器设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/container-settings.PNG)
{{< notice note >}}
For more information about how to set other parameters on the **Container Settings** page, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/).
有关如何在**容器设置**页面设置其他参数的更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。
{{</ notice >}}
5. On the **Mount Volumes** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue.
5. 在**挂载存储**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。
6. On the **Advanced Settings** page, check **Internet Access** and select **NodePort** as the access method. Click **Create** to finish the whole process.
6. 在**高级设置**页面,选中**外网访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。
![advanced-settings](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/advanced-settings.jpg)
![高级设置](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/advanced-settings.PNG)
7. Click **Image Builder** from the navigation bar and you can see that the example image is being built.![building](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building.jpg)
7. 点击左侧导航栏的**构建镜像**,您可以看到正在构建示例镜像。![构建中](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building.PNG)
### Step 3: Check results
### 步骤 3查看结果
1. Wait for a while and you can see the status of the image has reached **Successful**.
1. 稍等片刻,您可以看到镜像状态变为**成功**。
![successful](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/successful.jpg)
![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/successful.PNG)
2. Click this image to go to its detail page. Under **Job Records**, click the arrow icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally.
2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的箭头图标查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`
![inspect-logs](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-logs.jpg)
![查看日志](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-logs.PNG)
3. Go back to the previous page, and you can see the corresponding Job, Deployment and Service of the image have all been created successfully.
3. 回到上一层页面,您可以看到该镜像相应的任务、部署和服务都已成功创建。
#### Service
#### 服务
![service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/service.jpg)
![service](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/service.PNG)
#### Deployment
#### 部署
![deployment](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/deployment.jpg)
![deployment](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/deployment.PNG)
#### Job
#### 任务
![job](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job.jpg)
![job](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job.PNG)
4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag.
4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。
![docker-image](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image.jpg)
![Docker 镜像](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image.PNG)
### Step 4: Access the B2I Service
### 步骤 4访问 B2I 服务
1. On the **Services** page, click the B2I Service to go to its detail page, where you can see the port number has been exposed.
1. 在**服务**页面,请点击 B2I 服务前往其详情页面,您可以查看暴露的端口号。
![exposed-port](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/exposed-port.jpg)
![端口暴露](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/exposed-port.PNG)
2. Access the Service at `http://{$Node IP}:{$NodePort}/{$Binary-Package-Name}/`.
2. 通过 `http://{$Node IP}:{$NodePort}/{$Binary-Package-Name}/` 访问服务。
![access-service](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/access-service.jpg)
![访问服务](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/access-service.PNG)
{{< notice note >}}
You may need to open the port in your security groups and configure port forwarding rules depending on your deployment environment.
取决于您的部署环境,您可能需要在安全组中放行端口并配置端口转发规则。
{{</ notice >}}
## Use the Image Builder to build an image
## 使用 Image Builder 构建镜像
The example above implements the entire workflow of B2I by creating a Service. Alternatively, you can use the Image Builder directly to build an image based on an artifact while this method will not publish the image to Kubernetes.
前述示例通过创建服务来实现整个 B2I 工作流。此外,您也可以直接使用 Image Builder 基于制品构建镜像,但这个方式不会将镜像发布至 Kubernetes。
![build-binary](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/build-binary.png)
![build-binary](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/build-binary.png)
{{< notice note >}}
Make sure you have created a Secret for Docker Hub. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets).
请确保您已经创建了 Docker Hub 密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。
{{</ notice >}}
### Step 1: Upload an artifact
### 步骤 1上传制品
1. Log in KubeSphere as `project-regular` and go to your project.
1. `project-regular` 身份登录 KubeSphere转到您的项目。
2. Select **Image Builder** from the navigation bar and click **Create**.
2. 在左侧导航栏中选择**构建镜像**,然后点击**创建**。
![image-builder](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-builder.jpg)
![image-builder](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-builder.PNG)
3. In the dialog that appears, select **binary** and click **Next**.
3. 在弹出对话框中,选择 **binary** 并点击**下一步**。
![upload-artifact](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/upload-artifact.jpg)
![upload-artifact](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/upload-artifact.PNG)
4. On the **Build Settings** page, provide the following information accordingly and click **Create**.
4. 在**构建设置**页面,请提供以下相应信息,然后点击**创建**。
![buidling-settings-2](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/buidling-settings-2.jpg)
![buidling-settings-2](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-settings-2.PNG)
**Upload Artifact**: Download [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) and upload it to KubeSphere.
**上传制品**:下载 [b2i-binary](https://github.com/kubesphere/tutorial/raw/master/tutorial%204%20-%20s2i-b2i/b2i-binary) 并上传至 KubeSphere。
**Build Environment**: Select **kubesphere/s2i-binary:v2.1.0**.
**构建环境**:选择 **kubesphere/s2i-binary:v2.1.0**
**imageName**: Customize an image name.
**镜像名称**:自定义镜像名称。
**tag**: The image tag. Enter `latest`.
**tag**:镜像标签,请输入 `latest`
**Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub.
**Target image repository**:镜像会推送至 Docker Hub故请选择 Docker Hub 密钥。
5. On the **Image Builder** page, you can see that the image is being built.
5. 在**构建镜像**页面,您可以看到正在构建镜像。
![building-status](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/building-status.jpg)
![构建状态](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/building-status.PNG)
### Step 2: Check results
### 步骤 2检查结果
1. Wait for a while and you can see the status of the image has reached **Successful**.
1. 稍等片刻,您可以看到镜像状态变为**成功**。
![image-success](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/image-success.jpg)
![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/image-success.PNG)
2. Click this image to go to its detail page. Under **Job Records**, click the arrow icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally.
2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的箭头图标查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`
![inspect-log](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/inspect-log.jpg)
![查看日志](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/inspect-log.PNG)
3. Go back to the previous page, and you can see the corresponding Job of the image has been created successfully.
3. 回到上一层页面,您可以看到该镜像相应的任务已成功创建。
![job-created](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/job-created.jpg)
![Job 已创建](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/job-created.PNG)
4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag.
4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。
![docker-image-pushed](/images/docs/project-user-guide/image-builder/b2i-publish-artifact-to-kubernetes/docker-image-pushed.jpg)
![Docker 镜像已推送](/images/docs/zh-cn/project-user-guide/image-builder/binary-to-image/docker-image-pushed.PNG)

View File

@ -1,143 +1,143 @@
---
title: "Source to Image: Publish an App without a Dockerfile"
title: "Source to Image:无需 Dockerfile 发布应用"
keywords: 'KubeSphere, Kubernetes, Docker, S2I, Source-to-Image'
description: 'How to publish your application using Source-to-Image.'
linkTitle: "Source to Image: Publish an App without a Dockerfile"
description: '如何使用 Source-to-Image 发布应用程序。'
linkTitle: "Source to Image:无需 Dockerfile 发布应用"
weight: 10610
---
Source-to-Image (S2I) is a toolkit and workflow for building reproducible container images from source code. S2I produces ready-to-run images by injecting source code into a container image and letting the container prepare that source code for execution. KubeSphere integrates S2I to automatically build images and publish them to Kubernetes without any Dockerfile.
Source-to-Image (S2I) 是一个工具箱和工作流用于从源代码构建可再现容器镜像。S2I 通过将源代码注入容器镜像自动将编译后的代码打包成镜像。KubeSphere 集成 S2I 来自动构建镜像,并且无需任何 Dockerfile 即可发布到 Kubernetes。
This tutorial demonstrates how to use S2I to import source code of a Java sample project into KubeSphere by creating a Service. Based on the source code, the KubeSphere Image Builder will create a Docker image, push it to a target repository and publish it to Kubernetes.
本教程演示如何通过创建服务 (Service) 使用 S2I 将 Java 示例项目的源代码导入 KubeSphere。KubeSphere Image Builder 将基于源代码创建 Docker 镜像,将其推送至目标仓库,并发布至 Kubernetes。
![build-process](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-process.png)
![构建流程](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-process.png)
## Prerequisites
## 准备工作
- You need to enable the [KubeSphere DevOps System](../../../pluggable-components/devops/) as S2I is integrated into it.
- You need to create a [GitHub](https://github.com/) account and a [Docker Hub](http://www.dockerhub.com/) account. GitLab and Harbor are also supported. This tutorial uses a GitHub repository to provide the source code for building and pushes an image to Docker Hub.
- You need to create a workspace, a project and an account (`project-regular`). The account must be invited to the project with the role of `operator`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project).
- Set a CI dedicated node for building images. This is not mandatory but recommended for the development and production environment as it caches dependencies and reduces build time. For more information, see [Set a CI Node for Dependency Caching](../../../devops-user-guide/how-to-use/set-ci-node/).
- 您需要启用 [KubeSphere DevOps 系统](../../../pluggable-components/devops/),该系统已集成 S2I。
- 您需要创建一个 [GitHub](https://github.com/) 帐户和一个 [Docker Hub](http://www.dockerhub.com/) 帐户,也支持 GitLab 和 Harbor。本教程使用 Github 仓库提供源代码,用于构建镜像并推送至 Docker Hub。
- 您需要创建一个企业空间、一个项目和一个帐户 (`project-regular`),请务必邀请该帐户至项目中并赋予 `operator` 角色。有关更多信息,请参见[创建企业空间、项目、帐户和角色](../../../quick-start/create-workspace-and-project)。
- 设置一个 CI 专用节点用于构建镜像。该操作不是必需,但建议开发和生产环境进行设置,专用节点会缓存依赖项并缩短构建时间。有关更多信息,请参见[为缓存依赖项设置 CI 节点](../../../devops-user-guide/how-to-use/set-ci-node/)。
## Use Source-to-Image (S2I)
## 使用 Source-to-Image (S2I)
### Step 1: Fork the example repository
### 步骤 1Fork 示例仓库
Log in GitHub and fork the GitHub repository [devops-java-sample](https://github.com/kubesphere/devops-java-sample) to your personal GitHub account.
登录 GitHub 并 Fork GitHub 仓库 [devops-java-sample](https://github.com/kubesphere/devops-java-sample) 至您的 GitHub 个人帐户。
![fork-repository](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/fork-repository.jpg)
![Fork 仓库](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/fork-repository.PNG)
### Step 2: Create Secrets
### 步骤 2创建密钥 (Secret)
Log in KubeSphere as `project-regular`. Go to your project and create a Secret for Docker Hub and GitHub respectively. For more information, see [Create the Most Common Secrets](../../../project-user-guide/configuration/secrets/#create-the-most-common-secrets).
`project-regular` 身份登录 KubeSphere 控制台,转到您的项目,分别为 Docker Hub 和 GitHub 创建密钥。有关更多信息,请参见[创建常用密钥](../../../project-user-guide/configuration/secrets/#创建常用密钥)。
{{< notice note >}}
You do not need to create the GitHub Secret if your forked repository is open to the public.
如果您 Fork 的是公开仓库,则不需要创建 GitHub 密钥。
{{</ notice >}}
### Step 3: Create a Service
### 步骤 3创建服务
1. In the same project, navigate to **Services** under **Application Workloads** and click **Create**.
1. 在该项目中,转到**应用负载**下的**服务**,点击**创建**。
![create-service](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-service.jpg)
![创建服务](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-service.PNG)
2. Choose **Java** under **Build a New Service from Source Code Repository**, name it `s2i-demo` and click **Next**.
2. 选中**通过代码构建新的服务**下的 **Java**,将其命名为 `s2i-demo` 并点击**下一步**。
![select-lang-type](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/select-lang-type.jpg)
![选择语言类型](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/select-lang-type.PNG)
{{< notice note >}}
KubeSphere has integrated common S2I templates such as Java, Node.js and Python. If you want to use other languages or customize your S2I templates, see Customize S2I Templates.
KubeSphere 已集成常用的 S2I 模板,例如 Java、Node.js 和 Python。如果您想使用其他语言或自定义 S2I 模板,请参见自定义 S2I 模板。
{{</ notice >}}
3. On the **Build Settings** page, provide the following information accordingly and click **Next**.
3. 在**构建设置**页面,请提供以下相应信息,并点击**下一步**。
![build-settings](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-settings.jpg)
![构建设置](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-settings.PNG)
**Service Type**: Select **Stateless Service** for this example. For more information about different Services, see [Service Type](../../../project-user-guide/application-workloads/services/#service-type).
**服务类型**:本示例选择**无状态服务**。有关不同服务的更多信息,请参见[服务类型](../../../project-user-guide/application-workloads/services/#服务类型)。
**Build Environment**: Select **kubesphere/java-8-centos7:v2.1.0**.
**构建环境**:选择 **kubesphere/java-8-centos7:v2.1.0**
**Code URL**: The source code repository address (currently support Git). You can specify the code branch and the relative path in the source code terminal. The URL supports HTTP and HTTPS. Paste the forked repository URL (your own repository address) into this field.
**代码地址**:源代码仓库地址(目前支持 Git。您可以指定代码分支和在源代码终端的相对路径。URL 支持 HTTP 和 HTTPS。在该字段粘贴已 Fork 仓库的 URL您自己仓库的地址
![copy-repo-code](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/copy-repo-code.jpg)
![复制仓库代码](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/copy-repo-code.PNG)
**Branch**: The branch that is used for image building. Enter `master` for this tutorial. You can enter `dependency` for a cache test.
**分支**:分支用于构建镜像。本教程中在此输入 `master`。您可以输入 `dependency` 进行缓存测试。
**Secret**: You do not need to provide any Secret for a public repository. Select the GitHub Secret if you want to use a private repository.
**密钥**:您不需要为公共仓库提供密钥。如果您想使用私有仓库,请选择 GitHub 密钥。
**imageName**: Customize an image name. As this tutorial will push an image to Docker Hub, enter `dockerhub_username/s2i-sample`. `dockerhub_username` is your Docker ID and make sure it has the permission to push and pull images.
**镜像名称**:自定义镜像名称。本教程会向 Docker Hub 推送镜像,故请输入 `dockerhub_username/s2i-sample`。`dockerhub_username` 是您的 Docker ID请确保该 ID 有权限推送和拉取镜像。
**tag**: The image tag. Enter `latest`.
**tag**:镜像标签,请输入 `latest`
**Target image repository**: Select the Docker Hub Secret as the image is pushed to Docker Hub.
**Target image repository**:镜像会推送至 Docker Hub故请选择 Docker Hub 密钥。
**Advanced Settings**: You can define the code relative path. Use the default `/` for this field.
**高级设置**:您可以定义代码相对路径。该字段请使用默认的 `/`
4. On the **Container Settings** page, scroll down to **Service Settings** to set the access policy for the container. Select **HTTP** for **Protocol**, customize the name (for example, `http-1`), and input `8080` for both **Container Port** and **Service Port**.
4. 在**容器设置**页面,下拉至**服务设置**,为容器设置访问策略。**协议**选择 **HTTP**,自定义名称(例如 `http-1`**容器端口**和**服务端口**都输入 `8080`
![service-settings](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-settings.jpg)
![服务设置](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-settings.PNG)
5. Scroll down to **Health Checker** and check it. Set a readiness probe by filling out the following parameters. Click **√** when you finish setting the probe and then click **Next** to continue.
5. 下拉至**健康检查器**并选中,填写以下参数设置就绪探针。探针设置完成后点击 **√**,然后点击**下一步**继续。
![health-checker](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/health-checker.jpg)
![健康检查器](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/health-checker.PNG)
**HTTP Request**: Select **HTTP** as the protocol, enter `/` as the path (root path in this tutorial), and input `8080` as the port exposed.
**HTTP 请求**:选择 **HTTP** 作为协议,输入 `/` 作为路径(本教程中的根路径),输入 `8080` 作为暴露端口。
**Initial Delays**: The number of seconds after the container has started before the liveness probe is initiated. Enter `30` for this field.
**初始延迟**:容器启动后,存活探针启动之前等待的秒数。本字段输入 `30`
**Timeouts**: The number of seconds after which the probe times out. Enter `10` for this field.
**超时时间**:探针超时的秒数。本字段输入 `10`
For other fields, use the default value directly. For more information about how to configure probes and set other parameters on the **Container Settings** page, see [Container Image Settings](../../../project-user-guide/application-workloads/container-image-settings/).
其他字段请直接使用默认值。有关如何在**容器设置**页面配置探针和设置其他参数的更多信息,请参见[容器镜像设置](../../../project-user-guide/application-workloads/container-image-settings/)。
6. On the **Mount Volumes** page, you can add a volume for the container. For more information, see [Volumes](../../../project-user-guide/storage/volumes/). Click **Next** to continue.
6. 在**挂载存储**页面,您可以为容器添加存储卷。有关更多信息,请参见[存储卷](../../../project-user-guide/storage/volumes/)。点击**下一步**继续。
7. On the **Advanced Settings** page, check **Internet Access** and select **NodePort** as the access method. Click **Create** to finish the whole process.
7. 在**高级设置**页面,选中**外网访问**并选择 **NodePort** 作为访问方式。点击**创建**完成整个操作过程。
![create-finish](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/create-finish.jpg)
![创建完成](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/create-finish.PNG)
8. Click **Image Builder** from the navigation bar and you can see that the example image is being built.
8. 点击左侧导航栏的**构建镜像**,您可以看到正在构建示例镜像。
![building](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/building.jpg)
![构建中](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/building.PNG)
### Step 4: Check results
### 步骤 4查看结果
1. Wait for a while and you can see the status of the image has reached **Successful**.
1. 稍等片刻,您可以看到镜像状态变为**成功**。
![success-result](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/success-result.jpg)
![构建成功](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/successful-result.PNG)
2. Click this image to go to its detail page. Under **Job Records**, click the arrow icon on the right of a record to see building logs. You can see `Build completed successfully` at the end of the log if everything runs normally.
2. 点击该镜像前往其详情页面。在**任务记录**下,点击记录右侧的箭头图标查看构建日志。如果一切运行正常,您可以在日志末尾看到 `Build completed successfully`
![build-log](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/build-log.jpg)
![构建日志](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/build-log.PNG)
3. Go back to the previous page, and you can see the corresponding Job, Deployment and Service of the image have been all created successfully.
3. 回到上一层页面,您可以看到该镜像相应的任务、部署和服务都已成功创建。
#### Service
#### 服务
![service](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service.jpg)
![service](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service.PNG)
#### Deployment
#### 部署
![deployment](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/deployment.jpg)
![deployment](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/deployment.PNG)
#### Job
#### 任务
![job](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/job.jpg)
![job](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/job.PNG)
4. In your Docker Hub repository, you can see that KubeSphere has pushed the image to the repository with the expected tag.
4. 在您的 Docker Hub 仓库,您可以看到 KubeSphere 已经向仓库推送了带有预期标签的镜像。
![docker-image](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/docker-image.jpg)
![Docker 镜像](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/docker-image.PNG)
### Step 5: Access the S2I Service
### 步骤 5访问 S2I 服务
1. On the **Services** page, click the S2I Service to go to its detail page.
1. 在**服务**页面,请点击 S2I 服务前往其详情页面。
![service-detail](/images/docs/project-user-guide/image-builder/s2i-publish-app-without-dockerfile/service-detail.jpg)
![Service 详情](/images/docs/zh-cn/project-user-guide/image-builder/source-to-image/service-detail.PNG)
2. To access the Service, you can either use the endpoint with the `curl` command or visit `Node IP:Port Number`. For example:
2. 要访问该服务,您可以执行 `curl` 命令使用 Endpoint 或者访问 `Node IP:Port Number`。例如:
```bash
$ curl 10.10.131.44:8080
@ -146,6 +146,6 @@ You do not need to create the GitHub Secret if your forked repository is open to
{{< notice note >}}
If you want to access the Service outside the cluster, you may need to open the port in your security groups and configure port forwarding rules depending on your deployment environment.
如果您想从集群外访问该服务,可能需要根据您的部署环境在安全组中放行端口并配置端口转发规则。
{{</ notice >}}

View File

@ -140,7 +140,7 @@ KubeSphere 的多租户系统分三个层级,即**群集**、**企业空间**
![查看项目](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/查看项目.jpg)
4. 在项目的**概览**页面,默认情况下未设置项目配额。您可以点击**设置**并根据需要指定资源请求和限制例如CPU 和内存的限制分别设为 1 Core 和 1000 Gi
4. 在项目的**概览**页面,默认情况下未设置项目配额。您可以点击**设置**并根据需要指定[资源请求和限制](../../workspace-administration/project-quotas/)例如CPU 和内存的限制分别设为 1 Core 和 1000 Gi
![项目概览](/images/docs/zh-cn/quickstart/create-workspaces-projects-accounts/项目概览.jpg)

View File

@ -0,0 +1,10 @@
---
linkTitle: 通过KubeSphere S2I构建容器镜像
weight: 1
_build:
render: false
profit: 了解源码,编译,然后打包成镜像的整个过程
time: 2020-10-13 20:00-20:40
---

View File

@ -0,0 +1,9 @@
---
title: 通过KubeSphere S2I构建容器镜像
keywords: Kubesphere, Kubesphere learn
description: Kubesphere
pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image_lab.pdf
---

View File

@ -0,0 +1,7 @@
---
title: 通过KubeSphere S2I构建容器镜像
keywords: Kubesphere, Kubesphere learn
description: Kubesphere
pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image.pdf
---

View File

@ -0,0 +1,8 @@
---
title: 通过KubeSphere S2I构建容器镜像
keywords: Kubesphere, Kubesphere learn
description: Kubesphere
pdfUrl:
---

View File

@ -0,0 +1,9 @@
---
title: 通过KubeSphere S2I构建容器镜像
keywords: Kubesphere, Kubesphere learn
description: Kubesphere
video:
videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp-container%20foundation/lesson-6/KubeSphere_S2I_build_image.mp4
---

View File

@ -0,0 +1,10 @@
---
linkTitle: 应用调度
weight: 4
_build:
render: false
profit: 了解 KubeSphere 上应用调度方法
time: 2020-10-13 20:00-20:40
---

View File

@ -0,0 +1,7 @@
---
title: KubeSphere | 应用调度
pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications-lab.pdf
---

View File

@ -0,0 +1,5 @@
---
title: KubeSphere | 应用调度
pdfUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications-ppt.pdf
---

View File

@ -0,0 +1,8 @@
---
title: KubeSphere | 应用调度
video:
snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png
videoUrl: https://kubesphere-community.pek3b.qingstor.com/qkcp/lesson-27/KSCE-2020-S0001-27-Schedule-applications.mp4
---

68
content/zh/live/_index.md Normal file
View File

@ -0,0 +1,68 @@
---
title: live - KubeSphere | Enterprise container platform, built on Kubernetes
description: KubeSphere is an open source container platform based on Kubernetes for enterprise app development and deployment, suppors installing anywhere from on-premise datacenter to any cloud to edge.
keywords: KubeSphere,DevOps,Istio,Service Mesh,Jenkins,
css: "scss/live.scss"
section1:
title: KubeSphere 云原生直播间
image: /images/live/background.jpg
section2:
image: /images/live/friend.jpeg
url: ./test
notice:
title: KubeSphere 在直播电商行业的多集群应用实践
timeIcon: /images/live/clock.svg
time: 2020/10/22 10/25
baseIcon: /images/live/base.svg
base: 线上
tag: 预告
url: ./test
over:
title: KubeSphere 在直播电商行业的多集群应用实践
url: ./test
tag: 结束
section3:
videos:
- title: 为什么选择 KubeSphere
link: //player.bilibili.com/player.html?aid=69124503&bvid=BV1vJ411T7th&cid=119801064&page=1
snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png
type: iframe
createTime: 2019.12.14
group: Meetup
- title: 为什么选择 KubeSphere
link: https://kubesphere-docs.pek3b.qingstor.com/website/meetup/meetup-final-1226.mp4
snapshot: https://pek3b.qingstor.com/kubesphere-docs/png/20200206170305.png
type: video
createTime: 2019.12.14
group: Meetup
section4:
overImg: /images/live/over.svg
noticeImg: /images/live/notice.svg
list:
- title: KubeSphere 在直播电商行业的多集群应用实践1
date: 10/27
time: 13:30 - 14:10
lastTime: 2020-10-27T14:10:00Z
url: ./test
- title: KubeSphere 在直播电商行业的多集群应用实践2
date: 11/27
time: 13:30 - 14:10
lastTime: 2020-12-27T14:10:00Z
url: ./test
section5:
title: 分享你的主题
content: 想在社区分享你的落地经验?说出你和 KubeSphere 的开源故事,即刻加入 KubeSphere 开源社区直播计划,提交你的分享主题,将有定制礼品相送!
btn: 提交分享主题
url:
image: /images/live/30.svg
---

Some files were not shown because too many files have changed in this diff Show More