diff --git a/assets/scss/common.scss b/assets/scss/common.scss
index 7cb2d3e97..50f8dab47 100644
--- a/assets/scss/common.scss
+++ b/assets/scss/common.scss
@@ -388,7 +388,7 @@ section {
z-index: 2;
width: 20px;
height: 20px;
- background-image: url('/images/docs/copy-code.svg');
+ background-image: url('/images/docs/copy.png');
background-repeat: no-repeat;
background-size: cover;
cursor: pointer;
diff --git a/config/upstream/config.toml b/config/upstream/config.toml
index 2dfcc4b06..2dc7d522f 100644
--- a/config/upstream/config.toml
+++ b/config/upstream/config.toml
@@ -1 +1,6 @@
baseURL = "https://kubesphere.io"
+
+[params]
+
+addGoogleAnalytics = true
+addGoogleTag = true
\ No newline at end of file
diff --git a/content/en/_index.md b/content/en/_index.md
index 5b92c9970..f3cf430b1 100644
--- a/content/en/_index.md
+++ b/content/en/_index.md
@@ -93,7 +93,7 @@ section4:
content: Provide unified authentication with fine-grained roles and three-tier authorization system, and support AD/LDAP authentication
features:
- - name: Application Store
+ - name: App Store
icon: /images/home/store.svg
content: Provide an application store for Helm-based applications, and offer application lifecycle management
link: "/docs/pluggable-components/app-store/"
diff --git a/content/en/api/crd.md b/content/en/api/crd.md
new file mode 100644
index 000000000..4cbf33361
--- /dev/null
+++ b/content/en/api/crd.md
@@ -0,0 +1,8 @@
+---
+title: KubeSphere Api Documents
+description: KubeSphere Api Documents
+keywords: KubeSphere, KubeSphere Documents, Kubernetes
+
+swaggerUrl: json/crd.json
+---
+
diff --git a/content/en/api/kubesphere.md b/content/en/api/kubesphere.md
new file mode 100644
index 000000000..d5ef99133
--- /dev/null
+++ b/content/en/api/kubesphere.md
@@ -0,0 +1,7 @@
+---
+title: KubeSphere Api Documents
+description: KubeSphere Api Documents
+keywords: KubeSphere, KubeSphere Documents, Kubernetes
+
+swaggerUrl: json/kubesphere.json
+---
\ No newline at end of file
diff --git a/content/en/blogs/TiDB-on-KubeSphere-using-qke.md b/content/en/blogs/TiDB-on-KubeSphere-using-qke.md
index 30093e0a0..939fdd9f6 100644
--- a/content/en/blogs/TiDB-on-KubeSphere-using-qke.md
+++ b/content/en/blogs/TiDB-on-KubeSphere-using-qke.md
@@ -8,13 +8,15 @@ author: 'Willqy, Feynman, Sherlock'
snapshot: 'https://ap3.qingstor.com/kubesphere-website/docs/tidb-architecture.png'
---
-In a world where Kubernetes has become the de facto standard to build application services that span multiple containers, running a cloud-native distributed database represents an important part of the experience of using Kubernetes. In this connection, [TiDB](https://github.com/pingcap/tidb), as an open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads, has come to my awareness. It is MySQL compatible and features horizontal scalability, strong consistency, and high availability. It strives to provide users with a one-stop database solution that covers OLTP (Online Transactional Processing), OLAP (Online Analytical Processing), and HTAP services. TiDB is suitable for various use cases that require high availability and strong consistency with large-scale data.
+
+
+In a world where Kubernetes has become the de facto standard to build application services that span multiple containers, running a cloud-native distributed database represents an important part of the experience of using Kubernetes. In this connection, [TiDB](https://github.com/pingcap/tidb), an open-source NewSQL database that supports Hybrid Transactional and Analytical Processing (HTAP) workloads, has come to my awareness. It is MySQL compatible and features horizontal scalability, strong consistency, and high availability. It strives to provide users with a one-stop database solution that covers OLTP (Online Transactional Processing), OLAP (Online Analytical Processing), and HTAP services. TiDB is suitable for various use cases that require high availability and strong consistency with large-scale data.

Among others, [TiDB Operator](https://github.com/pingcap/tidb-operator) is an automatic operation system for TiDB clusters in Kubernetes. It provides a full management life-cycle for TiDB including deployment, upgrades, scaling, backup, fail-over, and configuration changes. With TiDB Operator, TiDB can run seamlessly in Kubernetes clusters deployed on public or private cloud.
-In addition to TiDB, I am also a KubeSphere user. [KubeSphere](https://kubesphere.io/) is a distributed operating system managing cloud-native applications with [Kubernetes](https://kubernetes.io/) as its kernel, providing a plug-and-play architecture for the seamless integration of third-party applications to boost its ecosystem. [KubeSphere can be run anywhere](https://kubesphere.io/docs/introduction/what-is-kubesphere/#run-kubesphere-everywhere) as it is highly pluggable without any hacking into Kubernetes.
+In addition to TiDB, I am also a KubeSphere user. [KubeSphere](https://kubesphere.io/) is an open-source distributed operating system managing cloud-native applications with [Kubernetes](https://kubernetes.io/) as its kernel, providing a plug-and-play architecture for the seamless integration of third-party applications to boost its ecosystem. [KubeSphere can be run anywhere](https://kubesphere.io/docs/introduction/what-is-kubesphere/#run-kubesphere-everywhere) as it is highly pluggable without any hacking into Kubernetes.

@@ -28,15 +30,19 @@ Therefore, I select QingCloud Kubernetes Engine (QKE) to prepare the environment
1. Log in the [web console of QingCloud](https://console.qingcloud.com/). Simply select KubeSphere (QKE) from the menu and create a Kubernetes cluster with KubeSphere installed. The platform allows you to install different components of KubeSphere. Here, we need to enable [OpenPitrix](https://github.com/openpitrix/openpitrix), which powers the app management feature in KubeSphere.
-
+{{< notice note >}}
+KubeSphere can be installed on any infrastructure. I just use QingCloud Platform as an example. See [KubeSphere Documentation](https://kubesphere.io/docs/) for more details.
+{{ notice >}}
-
+
+
+
2. The cluster will be up and running in around 10 minutes. In this example, I select 3 working nodes to make sure I have enough resources for the deployment later. You can also customize configurations based on your needs. When the cluster is ready, log in the web console of KubeSphere with the default account and password (`admin/P@88w0rd`). Here is the cluster **Overview** page:

-3. Use the built-in tool Kubectl from the Toolkit in the bottom right corner to execute the following command to install TiDB Operator CRD:
+3. Use the built-in **Web Kubectl** from the Toolkit in the bottom right corner to execute the following command to install TiDB Operator CRD:
```bash
kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/manifests/crd.yaml
@@ -56,7 +62,7 @@ kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/

-## Deploying tidb-operator
+## Deploying TiDB-operator
1. Like I mentioned above, we need to create a project (i.e. namespace) first to run the TiBD cluster.
@@ -94,7 +100,7 @@ kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.6/

-## Deploying tidb-cluster
+## Deploying TiDB-cluster
The process of deploying tidb-cluster is basically the same as that of tidb-operator shown above.
@@ -140,7 +146,7 @@ Now that we have our apps ready, we may need to focus more on observability. Kub

-2. tidb, tikv and pd are all stateful applications which can be found in **StatefulSets**. Note that tikv and tidb will be created automatically and it may take a while before displaying in the list.
+2. TiDB, TiKV and pd are all stateful applications which can be found in **StatefulSets**. Note that TiKV and TiDB will be created automatically and it may take a while before displaying in the list.

@@ -152,7 +158,7 @@ Now that we have our apps ready, we may need to focus more on observability. Kub

-5. Relevant Pods are also listed. As you can see, tidb-cluster contains three pd Pods, two tidb Pods and 3 tikv Pods.
+5. Relevant Pods are also listed. As you can see, tidb-cluster contains three pd Pods, two TiDB Pods and 3 TiKV Pods.

@@ -160,7 +166,7 @@ Now that we have our apps ready, we may need to focus more on observability. Kub

-7. Volume usage is also monitored. Here is an example of tikv:
+7. Volume usage is also monitored. Here is an example of TiKV:

@@ -208,7 +214,7 @@ mysql> show databases;
mysql>
```
-4. Besides, tidb integrates Prometheus and Grafana to monitor performance of the database cluster. As we can see above, Grafana is being exposed through `NodePort`. After you configure necessary port forwarding rules and open its port in security groups on QingCloud Platform, you can access the Grafana UI to view metrics.
+4. Besides, TiDB integrates Prometheus and Grafana to monitor performance of the database cluster. As we can see above, Grafana is being exposed through `NodePort`. After you configure necessary port forwarding rules and open its port in security groups on QingCloud Platform, you can access the Grafana UI to view metrics.

@@ -218,10 +224,12 @@ I hope you guys all have successfully deploy TiDB. Both TiDB and KubeSphere are
## References
-https://docs.pingcap.com/tidb-in-kubernetes/stable/get-started
+**KubeSphere GitHub**: https://github.com/kubesphere/kubesphere
-https://docs.pingcap.com/tidb-in-kubernetes/stable/tidb-operator-overview
+**TiDB GitHub**: https://github.com/pingcap/TiDB
-https://kubesphere.io/docs/introduction/what-is-kubesphere/
+**TiDB Operator Documentation**: https://docs.pingcap.com/tidb-in-kubernetes/stable/tidb-operator-overview
-https://kubesphere.io/docs/
\ No newline at end of file
+**KubeSphere Introduction**: https://kubesphere.io/docs/introduction/what-is-kubesphere/
+
+**KubeSphere Documentation**: https://kubesphere.io/docs/
\ No newline at end of file
diff --git a/content/en/blogs/multi-cluster-deployment.md b/content/en/blogs/multi-cluster-deployment.md
new file mode 100644
index 000000000..096ad243b
--- /dev/null
+++ b/content/en/blogs/multi-cluster-deployment.md
@@ -0,0 +1,139 @@
+---
+title: 'Kubernetes Multi-cluster Deployment: Federation and KubeSphere'
+keywords: Kubernetes, KubeSphere, Multi-cluster, Container
+description: KubeSphere v3.0 supports the management of multiple clusters, isolated management of resources, and federated deployments.
+tag: 'KubeSphere, Multi-cluster'
+createTime: '2020-07-20'
+author: 'Jeff, Feynman, Sherlock'
+snapshot: 'https://ap3.qingstor.com/kubesphere-website/docs/kubesphere-architecture.png'
+---
+
+## Scenarios for Multi-cluster Deployment
+
+As the container technology and Kubernetes see a surge in popularity among their users, it is not uncommon for enterprises to run multiple clusters for their business. In general, here are the main scenarios where multiple clusters can be adopted.
+
+### High Availability
+
+You can deploy workloads on multiple clusters by using a global VIP or DNS to send requests to corresponding backend clusters. When a cluster malfunctions or fails to handle requests, the VIP or DNS records can be transferred to a health cluster.
+
+
+
+### Low Latency
+
+When clusters are deployed in various regions, user requests can be forwarded to the nearest cluster, greatly reducing network latency. For example, we have three Kubernetes clusters deployed in New York, Houston and Los Angeles respectively. For users in California, their requests can be forwarded to Los Angeles. This will reduce the network latency due to geographical distance, providing the most consistent user experience possible for users in different areas.
+
+### Failure Isolation
+
+Generally, it is much easier for multiple small clusters to isolate failures than a large cluster. In case of outages, network failures, insufficient resources or other possible resulting issues, the failure can be isolated within a certain cluster without spreading to others.
+
+### Business Isolation
+
+Although Kubernetes provides namespaces as a solution to app isolation, this method only represents the isolation in logic. This is because different namespaces are connected through the network, which means the issue of resource preemption still exists. To achieve further isolation, you need to create additional network isolation policies or set resource quotas. Using multiple clusters can achieve complete physical isolation that is more secure and reliable than the isolation through namespaces. For example, this is extremely effective when different departments within an enterprise use multiple clusters for the deployment of development, testing or production environments.
+
+
+
+### Avoid Vendor Lock-in
+
+Kubernetes has become the de facto standard in container orchestration. Against this backdrop, many enterprises avoid putting all eggs in one basket as they deploy clusters by using services of different cloud providers. That means they can transfer and scale their business anytime between clusters. However, it is not that easy for them to transfer their business in terms of costs, as different cloud providers feature varied Kubernetes services, including storage and network interface.
+
+## Multi-cluster Deployment
+
+The application of multi-cluster deployment offers solutions to a variety of problems as we can see from the scenarios above. Nevertheless, it brings more complexity for operation and maintenance. For a single cluster, app deployment and upgrade are quite straightforward as you can directly update yaml of the cluster. For multiple clusters, you can update them one by one, but how can you guarantee the application load status is the same across different clusters? How to implement service discovery among different clusters? How to achieve load balancing across clusters? The answer given by the community is Federation.
+
+### Federation v1
+
+
+
+There are two versions of Federation with the original v1 already deprecated. In v1, the general architecture is very similar to that of Kubernetes. As shown above, Federation API Server is used for the management of different clusters. It receives the request to create the deployment of multiple clusters as Federation Controller Manager deploys workloads on each cluster.
+
+
+
+In terms of API, federated resources are scheduled through annotations, ensuring great compatibility with the original Kubernetes API. As such, the original code can be reused and existing deployment files of users can be easily transferred without any major change. However, this also prevents users from taking further advantage of Federation for API evolution. At the same time, a corresponding controller is needed for each federated resource so that they can be scheduled to different clusters. Originally, Federation only supported a limited number of resource type.
+
+### Federation v2
+
+
+
+The community developed Federation v2 (KubeFed) on the basis of v1. KubeFed has defined its own API standards through CRDs while deprecating the annotation method used before. The architecture has changed significantly as well, discarding Federated API Server and etcd that need to be deployed independently. The control plane of KubeFed adopts the popular implementation of CRD + Controller, which can be directly installed on existing Kubernetes clusters without any additional deployment.
+
+KubeFed mainly defines four resource types:
+
+- **Cluster Configuration** defines the register information needed for the control plane to add member clusters, including cluster name, APIServer address and the credential to create deployments.
+
+- **Type Configuration** defines the resource type that KubeFed should handle. Each Type Configuration is a CRD object that contains three configuration items:
+
+ - **Template.** Templates define the representation of a common resource to be handled. If the object does not have a corresponding definition on the cluster where it will be deployed, the deployment will fail. In the following example of FederatedDeployment, the template contains all the information needed to create the deployment.
+ - **Placement**. Placements define the name of the cluster that a resource object will appear in, with two methods available (`clusters` and `clusterSelector`).
+ - **Override**. Overrides define per-cluster, field-level variation that apply to the template, allowing you to customize configurations. In the example below, the number of replicas defined in `template` is 1, while `overrides` shows the replica number of cluster gondor will be 4 when it is deployed instead of 1 in `template`. A subset of the syntax of [Jsonpatch](http://jsonpatch.com/) is achieved in `overrides`, which means, theoretically, all the content in `template` can be overridden.
+
+ 
+
+- **Schedule** defines how apps are deployed across clusters, mainly related to ReplicaSets and Deployments. The maximum and minimum number of replicas of the load on a cluster can be defined through Schedule, which is similar to the annotation method in v1.
+- **MultiClusterDNS** makes it possible for service discovery across clusters. Service discovery across multiple clusters is much more complicated than in a single cluster. ServiceDNSRecord, IngressDNSRecord and DNSEndpoint objects are used in KubeFed to implement service discovery across multiple clusters (DNS needed as well).
+
+In general, KubeFed has provided solutions to many problems in v1. With CRDs, federated resources can be scaled to a large extent. Basically, all Kubernetes resources can be deployed across multiple clusters, including the CRD resources defined by users themselves.
+
+However, KubeFed also has some issues to be resolved:
+
+- **Single point of failure**. The control plane of KubeFed is achieved through CRD + Controller. High availability can be implemented for the controller itself, but the whole control plane will malfunction if Kubernetes it runs on fails. This was also [discussed in the community](https://github.com/kubernetes-sigs/kubefed/issues/636) before. Currently, KubeFed uses the push/reconcile method. When federated resources are created, the controller of the control plane will send the resource object to clusters accordingly. After that, the control plane is not responsible for how the member cluster handles resources. Therefore, existing application workloads will not be affected when KubeFed control plane fails.
+- **Maturity**. The KubeFed community is not as active as the Kubernetes community. Its iteration cycle is too slow and many features are still in the beta stage.
+- **Abstraction**. KubeFed defines resources to be managed through Type Configurations. Different Type Configurations only vary in their templates. The advantage is that the logic can be unified so that they can be quickly achieved. In KubeFed, the corresponding Controllers of Type Configuration resources are all created through [templates](https://github.com/kubernetes-sigs/kubefed/blob/master/pkg/controller/federatedtypeconfig/controller.go). That said, the shortcoming is quite obvious as customized features are not supported for special Types. For instance, for a FederatedDeployment object, KubeFed only needs to create a deployment object accordingly based on template and override, which will be deployed on the cluster specified in placement. As for whether the corresponding Pod is created based on the deployment and how the Pod runs, you can only check the information in the related cluster. The community has realized this issue and is working on it. A [proposal](https://github.com/kubernetes-sigs/kubefed/pull/1237) has already been raised.
+
+## Multi-cluster Feature in KubeSphere
+
+Resource federation is what the community has proposed to solve the issue of deployments across multiple clusters. For many enterprise users, the deployment of multiple clusters is not necessary. What is more important is that they need to be able to manage the resources across multiple clusters at the same time and in the same place.
+
+[KubeSphere](https://github.com/kubesphere) supports the management of multiple clusters, isolated management of resources, and federated deployments. In addition, it also features multi-dimensional queries (monitoring, logging, events and auditing) of resources such as clusters and apps, as well as alerts and notifications through various channels. Apps can be deployed on multiple clusters with CI/CD pipelines.
+
+
+
+KubeSphere 3.0 supports unified management of user access for the multi-cluster feature based on KubeFed, RBAC and Open Policy Agent. With the multi-tenant architecture, it is very convenient for business departments, development teams and Ops teams to manage resources isolatedly in a unified console according to their needs.
+
+
+
+### Architecture
+
+
+
+The overall multi-cluster architecture of [KubeSphere](https://kubesphere.io/) is shown above. The cluster where the control plane is located is called Host cluster. The cluster managed by the Host cluster is called Member cluster, which is essentially a Kubernetes cluster with KubeSphere installed. The Host cluster needs to be able to access the kube-apiserver of Member clusters. Besides, there is no requirement for the network connectivity between Member clusters. The Host cluster is independent of the member clusters managed by it, which do not know the existence of the Host cluster. The advantage of the logic is that when the Host cluster malfunctions, Member clusters will not be affected and deployed workloads can continue to run as well.
+
+In addition, the Host cluster also serves as an entry for API requests. It will forward all resource requests for member clusters to them. In this way, not only can requests be aggregated, but also authentication and authorization can be implemented in a unified fashion.
+
+### Authorization and Authentication
+
+It can be seen from the architecture that the Host cluster is responsible for the synchronization of identity and access information of clusters, which is achieved by federated resources of KubeFed. When FederatedUser, FederatedRole, or FederatedRoleBinding is created on the Host cluster, KubeFed will push User, Role, or Rolebinding to Member clusters. Any access change will only be applied to the Host cluster, which will then be synchronized to Member clusters. This is to ensure the integrity of each Member cluster. In this regard, the identity and access data stored in Member clusters enable them to implement authentication and authorization independently without any reliance on the Host cluster. In the multi-cluster architecture of KubeSphere, the Host cluster acts as a resource coordinator instead of a dictator, since it delegates power to Member clusters as much as possible.
+
+### Cluster Connectivity
+
+The multi-cluster feature of KubeSphere only entails the access of the Host cluster to the Kubernetes APIServer of Member clusters. There is no requirement for network connectivity at the cluster level. KubeSphere provides two methods for the connection of Host and Member clusters:
+
+- **Direct connection**. If the kube-apiserver address of Member clusters is accessible on any node of the Host cluster, you can adopt this method. Member clusters only need to provide the cluster kubeconfig. This method applies to most public cloud Kubernetes services or the scenario where the Host cluster and Member clusters are in the same network.
+
+- **Agent connection**. In case Member clusters are in a private network with the kube-apiserver address unable to be exposed, KubeSphere provides [Tower](https://github.com/kubesphere/tower) for agent connection. Specifically, the Host cluster will run a proxy service. When a new cluster joins, the Host cluster will generate all credential information. Besides, the agent running on Member clusters will connect to the proxy service running on the Host cluster. A reverse proxy will be created after the handshake succeeds. As the kube-apiserver address of Member clusters will change in agent connection, the Host cluster needs to create a new kubeconfig for Member clusters. This is very convenient as the underlying details can be hidden. In either direct connection or agent connection, the control plane is provided with a kubeconfig that can be used directly.
+
+ 
+
+### API Forwarding
+
+In the multi-cluster architecture of KubeSphere, the Host cluster serves as a cluster entry. All API requests are directly sent to the Host cluster first, which will decide where these requests go next. To provide the best compatibility possible with the original API in the multi-cluster environment, the API request whose path begins with `/apis/clusters/{cluster}` will be forwarded to the cluster `{cluster}`, with `/clusters/{cluster}` removed. The advantage is that there is no difference between the request the cluster receives this way with other requests, with no additional configuration or operation needed.
+
+
+
+For example:
+
+
+
+The request above will be forwarded to a cluster named rohan and be handled as:
+
+
+
+## Summary
+
+The topic of multi-cluster deployment is far more complicated than we think. The fact that the Federation solution provided by the community has not been officially released after two versions is a typical example. As we often put it, there is no Silver Bullet in software engineering. It is impossible for multi-cluster tools such as KubeFed and KubeSphere to solve all the issues. We need to find the solution that best suits us based on the specific business scenario. It is believed that these tools will become more mature over time, which can be applied in more scenarios.
+
+## References
+
+1. KubeFed: https://github.com/kubernetes-sigs/kubefed
+2. KubeSphere Website: https://kubesphere.io/
+3. Kubernetes Federation Evolution: https://kubernetes.io/blog/2018/12/12/kubernetes-federation-evolution/
+4. KubeSphere GitHub: https://github.com/kubesphere
\ No newline at end of file
diff --git a/content/en/contribution/_index.md b/content/en/contribution/_index.md
index 349828612..42ba85c67 100644
--- a/content/en/contribution/_index.md
+++ b/content/en/contribution/_index.md
@@ -15,9 +15,9 @@ section2:
icon2: 'images/contribution/37.png'
children:
- content: 'Download KubeSphere'
- link: 'https://kubesphere.io/docs/installation/intro/'
+ link: '../../../docs/quick-start/all-in-one-on-linux/'
- content: 'Quickstart'
- link: 'https://kubesphere.io/docs/quick-start/admin-quick-start/'
+ link: '../../../docs/quick-start/create-workspace-and-project/'
- content: 'Tutorial Videos'
link: '../videos'
@@ -84,7 +84,7 @@ section3:
- name: 'Apps'
icon: '/images/contribution/apps.svg'
iconActive: '/images/contribution/apps-active.svg'
- content: 'App charts for the built-in Application Store'
+ content: 'App charts for the built-in App Store'
link: 'https://github.com/kubesphere/community/tree/master/sig-apps'
linkContent: 'Join SIG - Apps →'
children:
@@ -92,7 +92,7 @@ section3:
- icon: '/images/contribution/calicq2.jpg'
- icon: '/images/contribution/calicq3.jpg'
- - name: 'Application Store'
+ - name: 'App Store'
icon: '/images/contribution/app-store.svg'
iconActive: '/images/contribution/app-store-active.svg'
content: 'App Store, App template management'
diff --git a/content/en/docs/access-control-and-account-management/_index.md b/content/en/docs/access-control-and-account-management/_index.md
index 36e06aedc..d03f3c0cd 100644
--- a/content/en/docs/access-control-and-account-management/_index.md
+++ b/content/en/docs/access-control-and-account-management/_index.md
@@ -1,9 +1,9 @@
---
-title: "Accecc Control and Account Management"
-description: "Accecc Control and Account Management"
+title: "Access Control and Account Management"
+description: "Access Control and Account Management"
layout: "single"
-linkTitle: "Accecc Control and Account Management"
+linkTitle: "Access Control and Account Management"
weight: 4500
icon: "/images/docs/docs.svg"
diff --git a/content/en/docs/api-reference/api-changes/monitoring.md b/content/en/docs/api-reference/api-changes/monitoring.md
index 99c682381..6051fce3e 100644
--- a/content/en/docs/api-reference/api-changes/monitoring.md
+++ b/content/en/docs/api-reference/api-changes/monitoring.md
@@ -34,10 +34,6 @@ The following metrics have been deprecated and removed.
|cluster_workspace_count|
|cluster_account_count|
|cluster_devops_project_count|
-|workspace_namespace_count|
-|workspace_devops_project_count|
-|workspace_member_count|
-|workspace_role_count|
|coredns_up_sum|
|coredns_cache_hits|
|coredns_cache_misses|
@@ -53,6 +49,15 @@ The following metrics have been deprecated and removed.
|prometheus_up_sum|
|prometheus_tsdb_head_samples_appended_rate|
+New metrics are introduced in KubeSphere 3.0.0.
+
+|New Metrics|
+|---|
+|kubesphere_workspace_count|
+|kubesphere_user_count|
+|kubesphere_cluser_count|
+|kubesphere_app_template_count|
+
## Response Fields
In KubeSphere 3.0.0, the response fields `metrics_level`, `status` and `errorType` are removed.
diff --git a/content/en/docs/application-store/_index.md b/content/en/docs/application-store/_index.md
index 9c84de2af..5d0faebfe 100644
--- a/content/en/docs/application-store/_index.md
+++ b/content/en/docs/application-store/_index.md
@@ -1,23 +1,14 @@
---
-title: "Application Store"
+title: "App Store"
description: "Getting started with KubeSphere DevOps project"
layout: "single"
-linkTitle: "Application Store"
+
+linkTitle: "App Store"
weight: 4600
icon: "/images/docs/docs.svg"
---
-## Installing KubeSphere and Kubernetes on Linux
-
-In this chapter, we will demonstrate how to use KubeKey to provision a new Kubernetes and KubeSphere cluster based on different infrastructures. Kubekey can help you to quickly build a production-ready cluster architecture on a set of machines from zero to one. It also helps you to easily scale the cluster and install pluggable components on existing architecture.
-
-## Most Popular Pages
-
-Below you will find some of the most common and helpful pages from this chapter. We highly recommend you to review them at first.
-
-{{< popularPage icon="/images/docs/bitmap.jpg" title="Install KubeSphere on AWS EC2" description="Provisioning a new Kubernetes and KubeSphere cluster based on AWS" link="" >}}
-
-{{< popularPage icon="/images/docs/bitmap.jpg" title="Install KubeSphere on AWS EC2" description="Provisioning a new Kubernetes and KubeSphere cluster based on AWS" link="" >}}
+TBD
diff --git a/content/en/docs/application-store/app-lifecycle-management.md b/content/en/docs/application-store/app-lifecycle-management.md
new file mode 100644
index 000000000..2b33ccaae
--- /dev/null
+++ b/content/en/docs/application-store/app-lifecycle-management.md
@@ -0,0 +1,162 @@
+---
+title: "App Lifecycle Management"
+keywords: 'kubernetes, kubesphere, app-store'
+description: 'App Lifecycle Management'
+
+
+weight: 2240
+---
+
+KubeSphere integrates open-source project [OpenPitrix](https://github.com/openpitrix/openpitrix) to set up the App Store which provide the full lifecycle of application management. App Store supports two kinds of application deployment as follows:
+
+> - **Application template** provides a way for developers and ISVs to share applications with users in a workspace. It also supports importing third-party application repositories within workspace.
+> - **Composing application** means users can quickly compose multiple microservices into a complete application through the one-stop console.
+
+
+
+## Objective
+
+In this tutorial, we will walk you through how to use [EMQ X](https://www.emqx.io/) as a demo application to demonstrate the **global application store** and **application lifecycle management** including upload / submit / review / test / release / upgrade / delete application templates.
+
+## Prerequisites
+
+- You need to install [App Store (OpenPitrix)](../../pluggable-components/app-store).
+- You need to create a workspace and a project, see [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/).
+
+## Hands-on Lab
+
+### Step 1: Create Customized Role and Account
+
+In this step, we will create two accounts, i.e., `isv` for ISVs and `reviewer` for app technical reviewers.
+
+1.1. First of all, we need to create a role for app reviewers. Log in KubeSphere console with the account `admin`, go to **Platform → Access Control → Account Roles**, then click **Create** and name it `app-review`, choose **App Templates Management** and **App Templates View** in the authorization settings list, then click **Create**.
+
+
+
+1.2. Create an account `reviewer`, and grant the role of **app-review** to it.
+
+1.3. Similarly, create an account `isv`, and grant the role of **platform-regular** to it.
+
+
+
+1.4. Invite the accounts that we created above to an existing workspace such as `demo-workspace`, and grant them the role of `workspace-admin`.
+
+### Step 2: Upload and Submit Application
+
+2.1. Log in KubeSphere with `isv`, enter the workspace. We are going to upload the EMQ X app to this workspace. First please download [EMQ X chart v1.0.0](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/emqx-v1.0.0-rc.1.tgz) and click **Upload Template** by choosing **App Templates**.
+
+> Note we are going to upload a newer version of EMQ X to demo the upgrade feature later on.
+
+
+
+2.2. Click **Upload**, then click **Upload Helm Chart Package** to upload the chart.
+
+
+
+2.3. Click **OK**. Now download [EMQ Icon](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/emqx-logo.png) and click **Upload icon** to upload App logo. Click **OK** when you are done.
+
+
+
+2.4. At this point, you will be able to see the status displays `draft`, which means this app is under developing. The uploaded app is visible for all members in the same workspace.
+
+
+
+2.5. Enter app template detailed page by clicking on EMQ X from the list. You can edit the basic information of this app by clicking **Edit Info**.
+
+
+
+2.6. You can customize the app's basic information by filling in the table as the following screenshot.
+
+
+
+2.7. Save your changes, then you can test this application by deploying to Kubernetes. Click on the **Test Deploy** button.
+
+
+
+2.8. Select cluster and project that you want to deploy into, check app config then click **Deploy**.
+
+
+
+
+
+2.9. Wait for a few minutes, then switch to the tab **Deployed Instances**. You will find EMQ X App has been deployed successfully.
+
+
+
+2.10. At this point, you can click `Submit Review` to submit this application to `reviewer`.
+
+
+
+2.11. As shown in the following graph, the app status has been changed to `Submitted`. Now app reviewer can review it.
+
+
+
+### Step 3: Review Application
+
+3.1. Log out, then use `reviewer` account to log in KubeSphere. Navigate to **Platform → App Store Management → App Review**.
+
+
+
+3.2. Click **Review** by clicking the vertical three dots at the end of app item in the list, then you start to review the app's basic information, introduction, chart file and updated logs from the pop-up windows.
+
+
+
+3.3. It is the reviewer's responsibility to judge if the app satisfies the criteria of the Global App Store or not, if yes, then click `Pass`; otherwise, `Reject` it.
+
+### Step 4: Release Application to Store
+
+4.1. Log out and switch to use `isv` to log in KubeSphere. Now `isv` can release the EMQ X application to the global application store which means all users in this platform can find and deploy this application.
+
+4.2. Enter the demo workspace and navigate to the EMQ X app from the template list. Enter the detailed page and expand the version list, then click **Release to Store**, choose **OK** in the pop-up windows.
+
+
+
+4.3. At this point, EMQ X has been released to application store.
+
+
+
+4.4. Go to **App Store** in the top menu, you will see the app in the list.
+
+
+
+4.5. At this point, we can use any role of users to access EMQ X application. Click into the application detailed page to go through its basic information. You can click **Deploy** button to deploy the application to Kubernetes.
+
+
+
+### Step 5: Create Application Category
+
+Depending on the business needs, `Reviewer` can create multiple categories for different types of applications. It is similar as tag and can be used in application store to filter applications, e.g. Big data, Middleware, IoT, etc.
+
+As for EMQ X application, we can create a category and name it `IOT`. First switch back to the user `Reviewer`, go to **Platform → App Store Management → App Categories**
+
+
+
+Then click **Uncategorized** and find EMQ X, change its category to `IOT` and save it.
+
+> Note usually reviewer should create necessary categories in advance according to the requirements of the store. Then ISVs categorize their applications as appropriate before submitting for review.
+
+
+
+### Step 6: Add New Version
+
+6.1. KubeSphere supports adding new versions of existing applications for users to quickly upgrade. Let's continue to use `isv` account and enter the EMQ X template page in the workspace.
+
+
+
+6.2. Download [EMQ X v4.0.2](https://github.com/kubesphere/tutorial/raw/master/tutorial%205%20-%20app-store/emqx-v4.0.2.tgz), then click on the **New Version** on the right, upload the package that you just downloaded.
+
+
+
+6.3. Click **OK** when you upload successfully.
+
+
+
+6.4. At this point, you can test the new version and submit it to `Reviewer`. This process is similar to the one for the first version.
+
+
+
+6.5. After you submit the new version, the rest of process regarding review and release are also similar to the first version that we demonstrated above.
+
+### Step 7: Upgrade
+
+After the new version has been released to application store, all users can upgrade from this application.
\ No newline at end of file
diff --git a/content/en/docs/application-store/built-in-apps/memcached-app.md b/content/en/docs/application-store/built-in-apps/memcached-app.md
new file mode 100644
index 000000000..eef406492
--- /dev/null
+++ b/content/en/docs/application-store/built-in-apps/memcached-app.md
@@ -0,0 +1,42 @@
+---
+title: "Memcached App"
+keywords: 'Kubernetes, KubeSphere, Memcached, app-store'
+description: 'How to use built-in Memcached Object Storage'
+
+
+weight: 2242
+---
+[Memcached](https://memcached.org/) is designed for large data caches. Its API is available for most popular languages. This guide will show you one-click deployment for Memcached in Kubenetes .
+
+## Prerequisites
+
+- You have enabled [KubeSphere App Store](../../pluggable-components/app-store)
+- You have completed the tutorial in [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/). Now switch to use `project-regular` account to log in and enter into `demo-peoject`.
+
+## Hands-on Lab
+
+### Common steps
+
+1. Choose Memcached template `From App Store`.
+
+
+
+2. Check app info and click `Deploy` button.
+
+
+
+3. Select app version and deployment location, then go to **Next → Deploy**
+
+
+
+4. Wait for a few minutes, then you will see the application memcached showing active on the application list.
+
+
+
+5. Click into Memcached application, and then enter into its workload page and get the pod IP.
+
+
+
+6. Because Memcached service type is headless, we should connect it inside cluster with pod IP got previously and default port `11211`.
+
+
diff --git a/content/en/docs/application-store/built-in-apps/postgresql-app.md b/content/en/docs/application-store/built-in-apps/postgresql-app.md
new file mode 100644
index 000000000..5d0667afb
--- /dev/null
+++ b/content/en/docs/application-store/built-in-apps/postgresql-app.md
@@ -0,0 +1,50 @@
+---
+title: "PostgreSQL App"
+keywords: 'Kubernetes, KubeSphere, PostgreSQL, app-store'
+description: 'How to use built-in PostgreSQL'
+
+
+weight: 2242
+---
+[PostgreSQL](https://www.postgresql.org/) is a powerful, open source object-relational database system which is famous for reliability, feature robustness, and performance. This guide will show you one-click deployment for PostgreSQL in Kubenetes .
+
+## Prerequisites
+
+- You have enabled [KubeSphere App Store](../../pluggable-components/app-store)
+- You have completed the tutorial in [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/). Now switch to use `project-regular` account to log in and enter into `demo-peoject`.
+
+## Hands-on Lab
+
+### Common steps
+
+1. Choose PostgreSQL template `From App Store`.
+
+
+
+2. Check app info and click `Deploy` button.
+
+
+
+3. Select app version and deployment location, then go to **Next → Deploy**
+
+
+
+4. Wait for a few minutes, then you will see the application postgresql showing active on the application list.
+
+
+
+5. Click into PostgreSQL application, and then enter into its service page.
+
+
+
+6. In this page, make sure its deployment and Pod are running, then click **More → Edit Internet Access**, and select **NodePort** in the dropdown list, click **OK** to save it.
+
+
+
+7.Go to **App Template → Configuration Files** and get rootUsername and rootPassword from `values.yaml`.
+
+
+
+8. In this step, we can connect PostgreSQL db outside cluster using host: ${Node IP}, port: ${NODEPORT}, with the rootUsername and rootPassword we got previously.
+
+
diff --git a/content/en/docs/application-store/built-in-apps/rabbitmq-app.md b/content/en/docs/application-store/built-in-apps/rabbitmq-app.md
index f327747fd..d133da39b 100644
--- a/content/en/docs/application-store/built-in-apps/rabbitmq-app.md
+++ b/content/en/docs/application-store/built-in-apps/rabbitmq-app.md
@@ -6,59 +6,78 @@ description: 'How to deploy RabbitMQ on KubeSphere through App Store'
link title: "Deploy RabbitMQ"
weight: 251
---
-[RabbitMQ](https://www.rabbitmq.com/) is the most widely deployed open source message broker. and it's lightweight and easy to deploy on premises and in the cloud. It supports multiple messaging protocols. RabbitMQ can be deployed in distributed and federated configurations to meet high-scale, high-availability requirements.
-This tutorial walks you through an example of how to deploy RabbitMQ on KubeSphere.
+[RabbitMQ](https://www.rabbitmq.com/) is the most widely deployed open-source message broker. It is lightweight and easy to deploy on premises and in the cloud. It supports multiple messaging protocols. RabbitMQ can be deployed in distributed and federated configurations to meet high-scale, high-availability requirements.
+
+This tutorial walks you through an example of how to deploy RabbitMQ from the App Store of KubeSphere.
## Prerequisites
-- Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). RabbitMQ will be deployed from the App Store.
-- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-operator` and work in the project `test-project` in the workspace `test-workspace`.
+- Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/).
+- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project/).
## Hands-on Lab
### Step 1: Deploy RabbitMQ from App Store
-Please make sure you are landing on the **Overview** page of the project `test-project`.
+1. On the **Overview** page of the project `demo-project`, click **App Store** in the top left corner.
-1. Go to **App Store**.
+ 
-
+2. Find RabbitMQ and click **Deploy** on the **App Info** page.
-2. Find **RabbitMQ** and click **Deploy**.
+ 
-
+ 
-
+3. Set a name and select an app version. Make sure RabbitMQ is deployed in `demo-project` and click **Next**.
-3. Make sure RabbitMQ is deployed in `test-project` and click **Next**.
+ 
-
+4. In **App Config**, you can use the default configuration directly or customize the configuration either by specifying fields in a form or editing the YAML file. Record the value of **Root Username** and the value of **Root Password**, which will be used later for login. Click **Deploy** to continue.
-4. Use the default configuration or change the account and password as you want. then click **Deploy**.
+ 
-
+ 
+
+ {{< notice tip >}}
+
+ To see the manifest file, toggle the **YAML** switch.
+
+ {{ notice >}}
5. Wait until RabbitMQ is up and running.
-
+ 
### Step 2: Access RabbitMQ Dashboard
-1. Go to **Services**.and click **rabbiitmq-service-name**.
+To access RabbitMQ outside the cluster, you need to expose the app through NodePort first.
-
+1. Go to **Services** and click the service name of RabbitMQ.
-2. Click **More** and click **Edit Internet Access**.
+ 
-
+2. Click **More** and select **Edit Internet Access** from the drop-down menu.
-3. Select **NodePort** and click **Ok**. [Learn More](https://v2-1.docs.kubesphere.io/docs/project-setting/project-gateway/)
-
+ 
-4. Through {$NodeIP} : {$Nodeport} to access RabbitMQ management.
-
+3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](../../../project-administration/project-gateway/).
-5. Log in RabbitMQ management.
-
+ 
-6. If you want to learn more information about RabbitMQ please refer to https://www.rabbitmq.com/documentation.html.
+4. Under **Service Ports**, you can see ports are exposed.
+
+ 
+
+5. Access RabbitMQ **management** through `{$NodeIP}:{$Nodeport}`. Note that the username and password are those you set in **Step 1**.
+ 
+
+ 
+
+ {{< notice note >}}
+
+ You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed.
+
+ {{ notice >}}
+
+6. For more information about RabbitMQ, refer to [the official documentation of RabbitMQ](https://www.rabbitmq.com/documentation.html).
\ No newline at end of file
diff --git a/content/en/docs/application-store/built-in-apps/tomcat-app.md b/content/en/docs/application-store/built-in-apps/tomcat-app.md
index ecce78067..74e8022fd 100644
--- a/content/en/docs/application-store/built-in-apps/tomcat-app.md
+++ b/content/en/docs/application-store/built-in-apps/tomcat-app.md
@@ -6,67 +6,83 @@ description: 'How to deploy Tomcat on KubeSphere through App Store'
link title: "Deploy Tomcat"
weight: 261
---
-[Apache Tomcat](https://tomcat.apache.org/index.html) software powers numerous large-scale, mission-critical web applications across a diverse range of industries and organizations.
-This tutorial walks you through an example of how to deploy Tomcat on KubeSphere.
+[Apache Tomcat](https://tomcat.apache.org/index.html) powers numerous large-scale, mission-critical web applications across a diverse range of industries and organizations. Tomcat provides a pure Java HTTP web server environment in which Java code can run.
+
+This tutorial walks you through an example of deploying Tomcat from the App Store of KubeSphere.
## Prerequisites
-- Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/). Tomcat will be deployed from the App Store.
-- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-operator` and work in the project `test-project` in the workspace `test-workspace`.
+- Please make sure you [enable the OpenPitrix system](https://kubesphere.io/docs/pluggable-components/app-store/).
+- You need to create a workspace, a project, and a user account for this tutorial. The account needs to be a platform regular user and to be invited as the project operator with the `operator` role. In this tutorial, you log in as `project-regular` and work in the project `demo-project` in the workspace `demo-workspace`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project/).
## Hands-on Lab
### Step 1: Deploy Tomcat from App Store
-Please make sure you are landing on the **Overview** page of the project `test-project`.
+1. On the **Overview** page of the project `demo-project`, click **App Store** in the top left corner.
-1. Go to **App Store**.
+ 
-
+2. Find Tomcat and click **Deploy** on the **App Info** page.
-2. Find **Tomcat** and click **Deploy**.
+ 
-
+ 
-
+3. Set a name and select an app version. Make sure Tomcat is deployed in `demo-project` and click **Next**.
-3. Make sure Tomcat is deployed in `test-project` and click **Next**.
+ 
-
+4. In **App Config**, you can use the default configuration or customize the configuration by editing the YAML file directly. Click **Deploy** to continue.
-4. Use the default configuration and click **Deploy**.
-
-
+ 
5. Wait until Tomcat is up and running.
-
+ 
### Step 2: Access Tomcat Terminal
-1. Go to **Services** and click **tomcat-service-name**.
+1. Go to **Services** and click the service name of Tomcat.
-
+ 
-2. Expand pods information and click **terminal**. You can now use the feature.
-
-
+2. Under **Pods**, expand the menu to see container details, and then click the **Terminal** icon.
-3. You can view the deployed projects in `/usr/local/tomcat/webapps`.
-
+ 
-### Step 3: Access the Tomcat project in the browser
+3. You can view deployed projects in `/usr/local/tomcat/webapps`.
-1. Go to **Services** and click **tomcat-service-name**.
+ 
-2. Click **More** and click **Edit Internet Access**.
-
+### Step 3: Access Tomcat Project from Browser
-3. Select **NodePort** and click **Ok**. [Learn More](https://v2-1.docs.kubesphere.io/docs/project-setting/project-gateway/)
-
+To access Tomcat projects outside the cluster, you need to expose the app through NodePort first.
-4. Through {$NodeIP} : {$Nodeport} / {$Project path} to access the tomcat project in browser.
-
-
+1. Go to **Services** and click the service name of Tomcat.
-5. If you want to learn more information about Tomcat please refer to https://tomcat.apache.org/index.html.
+ 
+
+2. Click **More** and select **Edit Internet Access** from the drop-down menu.
+
+ 
+
+3. Select **NodePort** for **Access Method** and click **OK**. For more information, see [Project Gateway](https://deploy-preview-492--kubesphere-v3.netlify.app/docs/project-administration/project-gateway/).
+
+ 
+
+4. Under **Service Ports**, you can see the port is exposed.
+
+ 
+
+5. Access the sample Tomcat project through `{$NodeIP}:{$Nodeport}` in your browser.
+
+ 
+
+ {{< notice note >}}
+
+ You may need to open the port in your security groups and configure related port forwarding rules depending on your where your Kubernetes cluster is deployed.
+
+ {{ notice >}}
+
+6. For more information about Tomcat, refer to [the official documentation of Tomcat](https://tomcat.apache.org/index.html).
\ No newline at end of file
diff --git a/content/en/docs/cluster-administration/_index.md b/content/en/docs/cluster-administration/_index.md
index f5670b30c..e85333a13 100644
--- a/content/en/docs/cluster-administration/_index.md
+++ b/content/en/docs/cluster-administration/_index.md
@@ -13,15 +13,19 @@ icon: "/images/docs/docs.svg"
In KubeSphere, you set a cluster's configuration and configure its features using the interactive web console or the built-in native command-line tool kubectl. As a cluster administrator, you are responsible for a series of tasks, including cordoning and adding labels to nodes, controlling cluster visibility, monitoring cluster status, setting cluster-wide alerting and notification rules, as well as configuring storage and log collection solutions.
-{{< notice note >}}
+{{< notice note >}}
Multi-cluster management is not covered in this chapter. For more information about this feature, see [Multi-cluster Management](../multicluster-management/).
-{{ notice >}}
+{{ notice >}}
+
+## [Persistent Volume and Storage Class](../cluster-administration/persistent-volume-and-storage-class/)
+
+Learn basic concepts of PVs, PVCs and storage classes, and demonstrates how to manage storage classes and PVCs in KubeSphere.
## [Node Management](../cluster-administration/nodes/)
-Monitor node status and learn how to add node label or taints.
+Monitor node status and learn how to add node label or taints.
## [Cluster Status Monitoring](../cluster-administration/cluster-status-monitoring/)
@@ -29,7 +33,7 @@ Monitor how a cluster is functioning based on different metrics, including physi
## [Application Resources Monitoring](../cluster-administration/application-resources-monitoring/)
-Monitor application resources across the cluster, such as the number of Deployments and CPU usage of different projects.
+Monitor application resources across the cluster, such as the number of Deployments and CPU usage of different projects.
## Cluster-wide Alerting and Notification
@@ -73,3 +77,10 @@ Learn how to add Fluentd to receive logs, events or auditing logs.
Customize your email address settings to receive notifications of any alert.
+## [Customizing Platform Information](../cluster-administration/platform-settings/customize-basic-information/)
+
+Customize platform settings such as logo, title etc.
+
+## [Cluster Shutdown and Restart](../cluster-administration/shuting-down-and-restart-cluster-cracefully/)
+
+Learn how to gracefully shutting down your cluster and how to restart it.
diff --git a/content/en/docs/cluster-administration/application-resources-monitoring.md b/content/en/docs/cluster-administration/application-resources-monitoring.md
index dbfe55bab..1ec81d58c 100644
--- a/content/en/docs/cluster-administration/application-resources-monitoring.md
+++ b/content/en/docs/cluster-administration/application-resources-monitoring.md
@@ -9,7 +9,7 @@ weight: 400
In addition to monitoring data at the physical resource level, cluster administrators also need to keep a close track of application resources across the platform, such as the number of projects and DevOps projects, as well as the number of workloads and services of a specific type. Application resource monitoring provides a summary of resource usage and application-level trends of the platform.
-## Prerequisites
+## Prerequisites
You need an account granted a role including the authorization of **Clusters Management**. For example, you can log in the console as `admin` directly or create a new role with the authorization and assign it to an account.
@@ -17,25 +17,26 @@ You need an account granted a role including the authorization of **Clusters Man
1. Click **Platform** in the top left corner and select **Clusters Management**.
-
+ 
2. If you have enabled the [multi-cluster feature](../../multicluster-management) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly.
-
+ 
3. Choose **Application Resources** under **Monitoring & Alerting** to see the overview of application resource monitoring, including the summary of the usage of all resources in the cluster, as shown in the following figure.
-
+ 
4. Among them, **Cluster Resources Usage** and **Application Resources Usage** retain the monitoring data of the last 7 days and support custom time range queries.
-
+ 
5. Click a specific resource to view detailed usage and trends of it during a certain time period, such as **CPU** under **Cluster Resources Usage**. The detail page allows you to view specific monitoring data by project. The highly-interactive dashboard enables users to customize the time range, displaying the exact resource usage at a given time point.
-
+ 
## Usage Ranking
-**Usage Ranking** supports the sorting of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including **CPU Usage**, **Memory Usage**, **Pod Count**, as well as **Outbound Traffic** and **Inbound Traffic**. You can sort projects in ascending or descending order by one of the indicators in the drop-down list.
-
\ No newline at end of file
+**Usage Ranking** supports the sorting of project resource usage, so that platform administrators can understand the resource usage of each project in the current cluster, including **CPU Usage**, **Memory Usage**, **Pod Count**, as well as **Outbound Traffic** and **Inbound Traffic**. You can sort projects in ascending or descending order by one of the indicators in the drop-down list. This feature is very useful for quickly locating your application (Pod) that is consuming heavy CPU or memory.
+
+
diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md
index 08cbc6fb2..0656bede9 100644
--- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md
+++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md
@@ -15,23 +15,25 @@ Before adding a log receiver, you need to enable any of the `logging`, `events`
1. To add a log receiver:
-- Login KubeSphere with an account of ***platform-admin*** role
-- Click ***Platform*** -> ***Clusters Management***
-- Select a cluster if multiple clusters exist
-- Click ***Cluster Settings*** -> ***Log Collections***
-- Log receivers can be added by clicking ***Add Log Collector***
+ - Login KubeSphere with an account of ***platform-admin*** role
+ - Click ***Platform*** -> ***Clusters Management***
+ - Select a cluster if multiple clusters exist
+ - Click ***Cluster Settings*** -> ***Log Collections***
+ - Log receivers can be added by clicking ***Add Log Collector***
-
+ 
2. Choose ***Elasticsearch*** and fill in the Elasticsearch service address and port like below:
-
+ 
3. Elasticsearch appears in the receiver list of ***Log Collections*** page and its status becomes ***Collecting***.
-
+ 
4. Verify whether Elasticsearch is receiving logs sent from Fluent Bit:
-- Click ***Log Search*** in the ***Toolbox*** in the bottom right corner.
-- You can search logs in the logging console that appears.
+ - Click ***Log Search*** in the ***Toolbox*** in the bottom right corner.
+ - You can search logs in the logging console that appears.
+
+ You can read [Log Query](../../../../toolbox/log-query/) to learn how to use the tool.
diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md
index 278fbe438..b41f09274 100644
--- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md
+++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md
@@ -125,31 +125,30 @@ EOF
1. To add a log receiver:
-- Login KubeSphere with an account of ***platform-admin*** role
-- Click ***Platform*** -> ***Clusters Management***
-- Select a cluster if multiple clusters exist
-- Click ***Cluster Settings*** -> ***Log Collections***
-- Log receivers can be added by clicking ***Add Log Collector***
+ - Login KubeSphere with an account of ***platform-admin*** role
+ - Click ***Platform*** -> ***Clusters Management***
+ - Select a cluster if multiple clusters exist
+ - Click ***Cluster Settings*** -> ***Log Collections***
+ - Log receivers can be added by clicking ***Add Log Collector***
-
+ 
2. Choose ***Fluentd*** and fill in the Fluentd service address and port like below:
-
+ 
3. Fluentd appears in the receiver list of ***Log Collections*** UI and its status shows ***Collecting***.
-
-
+ 
4. Verify whether Fluentd is receiving logs sent from Fluent Bit:
-- Click ***Application Workloads*** in the ***Cluster Management*** UI.
-- Select ***Workloads*** and then select the `default` namespace in the ***Workload*** - ***Deployments*** tab
-- Click the ***fluentd*** item and then click the ***fluentd-xxxxxxxxx-xxxxx*** pod
-- Click the ***fluentd*** container
-- In the ***fluentd*** container page, select the ***Container Logs*** tab
+ - Click ***Application Workloads*** in the ***Cluster Management*** UI.
+ - Select ***Workloads*** and then select the `default` namespace in the ***Workload*** - ***Deployments*** tab
+ - Click the ***fluentd*** item and then click the ***fluentd-xxxxxxxxx-xxxxx*** pod
+ - Click the ***fluentd*** container
+ - In the ***fluentd*** container page, select the ***Container Logs*** tab
-You'll see logs begin to scroll up continuously.
+ You'll see logs begin to scroll up continuously.
-
\ No newline at end of file
+ 
\ No newline at end of file
diff --git a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md
index d2aa50fae..4da8a863b 100644
--- a/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md
+++ b/content/en/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md
@@ -10,8 +10,8 @@ KubeSphere supports using Elasticsearch, Kafka and Fluentd as log receivers.
This doc will demonstrate:
- Deploy [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) and then create a Kafka cluster and a Kafka topic by creating `Kafka` and `KafkaTopic` CRDs.
-- Add Kafka log receiver to receive logs sent from Fluent Bit
-- Verify whether the Kafka cluster is receiving logs using [Kafkacat](https://github.com/edenhill/kafkacat)
+- Add Kafka log receiver to receive logs sent from Fluent Bit.
+- Verify whether the Kafka cluster is receiving logs using [Kafkacat](https://github.com/edenhill/kafkacat).
## Prerequisite
@@ -29,105 +29,104 @@ You can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-op
1. Install [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) to the `default` namespace:
-```bash
-helm repo add strimzi https://strimzi.io/charts/
-helm install --name kafka-operator -n default strimzi/strimzi-kafka-operator
-```
+ ```bash
+ helm repo add strimzi https://strimzi.io/charts/
+ helm install --name kafka-operator -n default strimzi/strimzi-kafka-operator
+ ```
2. Create a Kafka cluster and a Kafka topic in the `default` namespace:
-To deploy a Kafka cluster and create a Kafka topic, you simply need to open the ***kubectl*** console in ***KubeSphere Toolbox*** and run the following command:
+ To deploy a Kafka cluster and create a Kafka topic, you simply need to open the ***kubectl*** console in ***KubeSphere Toolbox*** and run the following command:
-{{< notice note >}}
+ {{< notice note >}}
+The following will create Kafka and Zookeeper clusters with storage type `ephemeral` which is `emptydir` for demo purpose. You should use other storage types for production, please refer to [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml).
+ {{ notice >}}
-The following will create Kafka and Zookeeper clusters with storage type `ephemeral` which is `emptydir` for demo purpose. You should use other storage types for production, please refer to [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml)
-
-{{ notice >}}
-
-```yaml
-cat < ***Change Status***
-
+ 
- You can select ***Activate*** or ***Close*** to turn the log receiver on or off
-
+ 
- Log receiver's status will be changed to ***Close*** if you turn it off, otherwise the status will be ***Collecting***
-
+ 
## Modify or delete a log receiver
@@ -89,6 +89,6 @@ You can modify a log receiver or delete it:
- Click a log receiver and enter the receiver details page.
- You can edit a log receiver by clicking ***Edit*** or ***Edit Yaml***
-
+ 
- Log receiver can be deleted by clicking ***Delete Log Collector***
diff --git a/content/en/docs/cluster-administration/cluster-settings/mail-server.md b/content/en/docs/cluster-administration/cluster-settings/mail-server.md
index b076a8577..e90978145 100644
--- a/content/en/docs/cluster-administration/cluster-settings/mail-server.md
+++ b/content/en/docs/cluster-administration/cluster-settings/mail-server.md
@@ -18,9 +18,9 @@ This guide demonstrates email notification settings (customized settings support
## Hands-on Lab
1. Log in the web console with one account granted the role `platform-admin`.
-2. Click **Platform** in the top left corner and select **Clusters Management**.
+2. Click **Platform** in the top left corner and select **Clusters Management**.
-
+ 
3. Select a cluster from the list and enter it (If you do not enable the [multi-cluster feature](../../../multicluster-management/), you will directly go to the **Overview** page).
4. Select **Mail Server** under **Cluster Settings**. In the page, provide your mail server configuration and SMTP authentication information as follows:
@@ -28,6 +28,6 @@ This guide demonstrates email notification settings (customized settings support
- **Use SSL Secure Connection**: SSL can be used to encrypt mails, thereby improving the security of information transmitted by mails. Usually you have to configure the certificate for the mail server.
- SMTP authentication information: Fill in **SMTP User**, **SMTP Password**, **Sender Email Address**, etc. as below
-
+ 
-5. After you complete the above settings, click **Save**. You can send a test email to verify the success of the server configuration.
\ No newline at end of file
+5. After you complete the above settings, click **Save**. You can send a test email to verify the success of the server configuration.
diff --git a/content/en/docs/cluster-administration/cluster-status-monitoring.md b/content/en/docs/cluster-administration/cluster-status-monitoring.md
index 868caf407..e808c0ad0 100644
--- a/content/en/docs/cluster-administration/cluster-status-monitoring.md
+++ b/content/en/docs/cluster-administration/cluster-status-monitoring.md
@@ -16,6 +16,7 @@ You need an account granted a role including the authorization of **Clusters Man
## Cluster Status Monitoring
1. Click **Platform** in the top left corner and select **Clusters Management**.
+<<<<<<< HEAD

2. If you have enabled the [multi-cluster feature](../../multicluster-management) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly.
@@ -24,10 +25,23 @@ You need an account granted a role including the authorization of **Clusters Man
3. Choose **Cluster Status** under **Monitoring & Alerting** to see the overview of cluster status monitoring, including **Cluster Node Status**, **Components Status**, **Cluster Resources Usage**, **ETCD Monitoring**, and **Service Component Monitoring**, as shown in the following figure.

+=======
+
+ 
+
+2. If you have enabled the [multi-cluster feature](../../multicluster-management) with member clusters imported, you can select a specific cluster to view its application resources. If you have not enabled the feature, refer to the next step directly.
+
+ 
+
+3. Choose **Cluster Status** under **Monitoring & Alerting** to see the overview of cluster status monitoring, including **Cluster Node Status**, **Components Status**, **Cluster Resources Usage**, **ETCD Monitoring**, and **Service Component Monitoring**, as shown in the following figure.
+
+ 
+>>>>>>> 5f1e339a014d6bf1d77bcedbb4f723ea0a7e556d
### Cluster Node Status
1. **Cluster Node Status** displays the status of all nodes, separately marking the active ones. You can go to the **Cluster Nodes** page shown below to view the real-time resource usage of all nodes by clicking **Node Online Status**.
+<<<<<<< HEAD

2. In **Cluster Nodes**, click the node name to view usage details in **Running Status**, including the information of CPU, Memory, Pod, Local Storage in the current node, and its health status.
@@ -38,16 +52,29 @@ You need an account granted a role including the authorization of **Clusters Man

{{< notice tip >}}
+=======
+ 
+
+2. In **Cluster Nodes**, click the node name to view usage details in **Running Status**, including the information of CPU, Memory, Pod, Local Storage in the current node, and its health status.
+
+ 
+
+3. Click the tab **Monitoring** to view how the node is functioning during a certain period based on different metrics, including **CPU Utilization, CPU Load Average, Memory Utilization, Disk Utilization, inode Utilization, IOPS, DISK Throughput, and Network Bandwidth**, as shown in the following figure.
+
+ 
+>>>>>>> 5f1e339a014d6bf1d77bcedbb4f723ea0a7e556d
+
+ {{< notice tip >}}
You can customize the time range from the drop-down list in the top right corner to view historical data.
-
-{{ notice >}}
+ {{ notice >}}
### Component Status
KubeSphere monitors the health status of various service components in the cluster. When a key component malfunctions, the system may become unavailable. The monitoring mechanism of KubeSphere ensures the platform can notify tenants of any occurring issues in case of a component failure, so that they can quickly locate the problem and take corresponding action.
1. On the **Cluster Status Monitoring** page, click components (the part in the green box below) under **Components Status** to view the status of service components.
+<<<<<<< HEAD

2. You can see all the components are listed in this part. Components marked in green are those functioning normally while those marked in orange require special attention as it signals potential issues.
@@ -55,10 +82,18 @@ KubeSphere monitors the health status of various service components in the clust

{{< notice tip >}}
+=======
+ 
+
+2. You can see all the components are listed in this part. Components marked in green are those functioning normally while those marked in orange require special attention as it signals potential issues.
+
+ 
+>>>>>>> 5f1e339a014d6bf1d77bcedbb4f723ea0a7e556d
+
+ {{< notice tip >}}
Components marked in orange may turn to green after a period of time, the reasons of which may be different, such as image pulling retries or pod recreations. You can click the component to see its service details.
-
-{{ notice >}}
+ {{ notice >}}
### Cluster Resources Usage
@@ -151,6 +186,11 @@ ETCD monitoring helps you to make better use of ETCD, especially to locate perfo

+<<<<<<< HEAD
+=======
+
+
+>>>>>>> 5f1e339a014d6bf1d77bcedbb4f723ea0a7e556d
## APIServer Monitoring
[API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) is the hub for the interaction of all components in a Kubernetes cluster. The following table lists the main indicators monitored for the APIServer.
diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md
index da0dc604c..32743df80 100644
--- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md
+++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-message.md
@@ -17,19 +17,21 @@ You have created a node-level alert policy and received alert notifications of i
### Task 1: View Alert Message
-1. Log in the console with one account granted the role `platform-admin`.
-2. Click **Platform** in the top left corner and select **Clusters Management**.
+1. Log in the console with one account granted the role `platform-admin`.
-
+2. Click **Platform** in the top left corner and select **Clusters Management**.
+
+ 
3. Select a cluster from the list and enter it (If you do not enable the [multi-cluster feature](../../../multicluster-management/), you will directly go to the **Overview** page).
+
4. Navigate to **Alerting Messages** under **Monitoring & Alerting**, and you can see alert messages in the list. In the example of [Alert Policy (Node Level)](../alerting-policy/), you set one node as the monitoring target, and its memory utilization rate is higher than the threshold of `50%`, so you can see an alert message of it.
-
+ 
5. Click the alert message to enter the detail page. In **Alerting Detail**, you can see the graph of memory utilization rate of the node over time, which has been continuously higher than the threshold of `50%` set in the alert rule, so the alert was triggered.
-
+ 
### Task 2: View Alert Policy
@@ -41,9 +43,9 @@ Switch to **Alerting Policy** to view the alert policy corresponding to this ale
1. Switch to **Recent Notification**. It can be seen that 3 notifications have been received, because the notification rule was set with a repetition period of `Alert once every 5 minutes` and retransmission of `Resend up to 3 times`.
-
+ 
-2. Log in your email to see alert notification mails sent by the KubeSphere mail server. You have received a total of 3 emails.
+2. Log in your email to see alert notification mails sent by the KubeSphere mail server. You have received a total of 3 emails.
### Task 4: Add Comment
diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md
index 34137db4d..87be272ef 100644
--- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md
+++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alerting-policy.md
@@ -20,19 +20,22 @@ KubeSphere provides alert policies for nodes and workloads. This guide demonstra
### Task 1: Create an Alert Policy
-1. Log in the console with one account granted the role `platform-admin`.
-2. Click **Platform** in the top left corner and select **Clusters Management**.
+1. Log in the console with one account granted the role `platform-admin`.
-
+2. Click **Platform** in the top left corner and select **Clusters Management**.
+
+ 
3. Select a cluster from the list and enter it (If you do not enable the [multi-cluster feature](../../../multicluster-management/), you will directly go to the **Overview** page).
+
4. Navigate to **Alerting Policies** under **Monitoring & Alerting**, and click **Create**.
-
+ 
### Task 2: Provide Basic Information
In the dialog that appears, fill in the basic information as follows. Click **Next** after you finish.
+
- **Name**: a concise and clear name as its unique identifier, such as `alert-demo`.
- **Alias**: to help you distinguish alert policies better. Chinese is supported.
- **Description**: a brief introduction to the alert policy.
@@ -41,7 +44,8 @@ In the dialog that appears, fill in the basic information as follows. Click **Ne
### Task 3: Select Monitoring Targets
-Select several nodes in the node list as the monitoring targets. Here a node is selected for the convenience of demonstration. Click **Next** when you finish.
+Select several nodes in the node list or use Node Selector to choose a group of nodes as the monitoring targets. Here a node is selected for the convenience of demonstration. Click **Next** when you finish.
+

{{< notice note >}}
@@ -54,7 +58,7 @@ You can sort nodes in the node list from the drop-down menu through the followin
1. Click **Add Rule** to begin to create an alerting rule. The rule defines parameters such as metric type, check period, consecutive times, metric threshold and alert level to provide rich configurations. The check period (the second field under **Rule**) means the time interval between 2 consecutive checks of the metric. For example, `2 minutes/period` means the metric is checked every two minutes. The consecutive times (the third field under **Rule**) means the number of consecutive times that the metric meets the threshold when checked. An alert is only triggered when the actual time is equal to or is greater than the number of consecutive times set in the alert policy.
-
+ 
2. In this example, set those parameters to `memory utilization rate`, `1 minute/period`, `2 consecutive times`, `>` and `50%`, and `Major Alert` in turn. It means KubeSphere checks the memory utilization rate every minute, and a major alert is triggered if it is larger than 50% for 2 consecutive times.
@@ -62,21 +66,23 @@ You can sort nodes in the node list from the drop-down menu through the followin
{{< notice note >}}
-- You can create node-level alert policies for the following metrics:
- - CPU: `cpu utilization rate`, `cpu load average 1 minute`, `cpu load average 5 minutes`, `cpu load average 15 minutes`
- - Memory: `memory utilization rate`, `memory available`
- - Disk: `inode utilization rate`, `disk space available`, `local disk space utilization rate`, `disk write throughput`, `disk read throughput`, `disk read iops`, `disk write iops`
- - Network: `network data transmitting rate`, `network data receiving rate`
- - Pod: `pod abnormal ratio`, `pod utilization rate`
+You can create node-level alert policies for the following metrics:
+
+- CPU: `cpu utilization rate`, `cpu load average 1 minute`, `cpu load average 5 minutes`, `cpu load average 15 minutes`
+- Memory: `memory utilization rate`, `memory available`
+- Disk: `inode utilization rate`, `disk space available`, `local disk space utilization rate`, `disk write throughput`, `disk read throughput`, `disk read iops`, `disk write iops`
+- Network: `network data transmitting rate`, `network data receiving rate`
+- Pod: `pod abnormal ratio`, `pod utilization rate`
{{ notice >}}
### Task 5: Set Notification Rule
1. **Effective Notification Time Range** is used to set sending time of notification emails, such as `09:00 ~ 19:00`. **Notification Channel** currently only supports **Email**. You can add email addresses of members to be notified to **Notification List**.
-1. **Customize Repetition Rules** defines sending period and retransmission times of notification emails. If alerts have not been resolved, the notification will be sent repeatedly after a certain period of time. Different repetition rules can also be set for different levels of alerts. Since the alert level set in the previous step is `Major Alert`, select `Alert once every 5 miniutes` (sending period) in the second field for **Major Alert** and `Resend up to 3 times` in the third field (retransmission times). Refer to the following image to set notification rules:
-
+2. **Customize Repetition Rules** defines sending period and retransmission times of notification emails. If alerts have not been resolved, the notification will be sent repeatedly after a certain period of time. Different repetition rules can also be set for different levels of alerts. Since the alert level set in the previous step is `Major Alert`, select `Alert once every 5 miniutes` (sending period) in the second field for **Major Alert** and `Resend up to 3 times` in the third field (retransmission times). Refer to the following image to set notification rules:
+
+ 
3. Click **Create**, and you can see that the alert policy is successfully created.
diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md
index e29adee1c..7aee3210d 100644
--- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md
+++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/alertmanager.md
@@ -21,7 +21,7 @@ Starting from v3.0, KubeSphere adds popular alert rules in the open source commu
## Use Alertmanager to manage K8s events alerts
-Alertmanager can be used to manage alerts sent from sources other than Prometheus. In KubeSphere v3.0 and above, user can use it to manage alerts triggered by K8s events. For more details, please refer to [kube-events](https://github.com/kubesphere/kube-events)
+Alertmanager can be used to manage alerts sent from sources other than Prometheus. In KubeSphere v3.0 and above, user can use it to manage alerts triggered by K8s events. For more details, please refer to [kube-events](https://github.com/kubesphere/kube-events).
## Use Alertmanager to manage KubeSphere auditing alerts
diff --git a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/notification-manager.md b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/notification-manager.md
index 30646e25b..8e3f2ad93 100644
--- a/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/notification-manager.md
+++ b/content/en/docs/cluster-administration/cluster-wide-alerting-and-notification/notification-manager.md
@@ -7,7 +7,7 @@ linkTitle: "Notification Manager"
weight: 2020
---
-[Notification Manager](https://github.com/kubesphere/notification-manager) manages notifications in KubeSphere. It receives alerts or notifications from different senders and then send notifications to different users.
+[Notification Manager](https://github.com/kubesphere/notification-manager) manages notifications in KubeSphere. It receives alerts or notifications from different senders and then sends notifications to different users.
Supported senders includes:
diff --git a/content/en/docs/cluster-administration/nodes.md b/content/en/docs/cluster-administration/nodes.md
index fa9bf6855..c48f021b2 100644
--- a/content/en/docs/cluster-administration/nodes.md
+++ b/content/en/docs/cluster-administration/nodes.md
@@ -21,30 +21,28 @@ Cluster nodes are only accessible to cluster administrators. Some node metrics a
1. Click **Platform** in the top left corner and select **Clusters Management**.
-
+ 
2. If you have enabled the [multi-cluster feature](../../multicluster-management) with member clusters imported, you can select a specific cluster to view its nodes. If you have not enabled the feature, refer to the next step directly.
-
+ 
3. Choose **Cluster Nodes** under **Nodes**, where you can see detailed information of node status.
-
+ 
-- **Name**: The node name and subnet IP address.
-- **Status**: The current status of a node, indicating whether a node is available or not.
-- **Role**: The role of a node, indicating whether a node is a worker or master.
-- **CPU**: The real-time CPU usage of a node.
-- **Memory**: The real-time memory usage of a node.
-- **Pods**: The real-time usage of Pods on a node.
-- **Allocated CPU**: This metric is calculated based on the total CPU requests of Pods on a node. It represents the amount of CPU reserved for workloads on this node, even if workloads are using fewer CPU resources. This figure is vital to the Kubernetes scheduler (kube-scheduler), which favors nodes with lower allocated CPU resources when scheduling a Pod in most cases. For more details, refer to [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
-- **Allocated Memory**: This metric is calculated based on the total memory requests of Pods on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources.
-
-{{< notice note >}}
+ - **Name**: The node name and subnet IP address.
+ - **Status**: The current status of a node, indicating whether a node is available or not.
+ - **Role**: The role of a node, indicating whether a node is a worker or master.
+ - **CPU**: The real-time CPU usage of a node.
+ - **Memory**: The real-time memory usage of a node.
+ - **Pods**: The real-time usage of Pods on a node.
+ - **Allocated CPU**: This metric is calculated based on the total CPU requests of Pods on a node. It represents the amount of CPU reserved for workloads on this node, even if workloads are using fewer CPU resources. This figure is vital to the Kubernetes scheduler (kube-scheduler), which favors nodes with lower allocated CPU resources when scheduling a Pod in most cases. For more details, refer to [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
+ - **Allocated Memory**: This metric is calculated based on the total memory requests of Pods on a node. It represents the amount of memory reserved for workloads on this node, even if workloads are using fewer memory resources.
+ {{< notice note >}}
**CPU** and **Allocated CPU** are different most times, so are **Memory** and **Allocated Memory**, which is normal. As a cluster administrator, you need to focus on both metrics instead of just one. It's always a good practice to set resource requests and limits for each node to match their real usage. Over-allocating resources can lead to low cluster utilization, while under-allocating may result in high pressure on a cluster, leaving the cluster unhealthy.
-
-{{ notice >}}
+ {{ notice >}}
## Node Management
@@ -55,22 +53,20 @@ Click a node from the list and you can go to its detail page.
- **Cordon/Uncordon**: Marking a node as unschedulable is very useful during a node reboot or other maintenance. The Kubernetes scheduler will not schedule new Pods to this node if it's been marked unschedulable. Besides, this does not affect existing workloads already on the node. In KubeSphere, you mark a node as unschedulable by clicking **Cordon** on the node detail page. The node will be schedulable if you click the button (**Uncordon**) again.
- **Labels**: Node labels can be very useful when you want to assign Pods to specific nodes. Label a node first (e.g. label GPU nodes with `node-role.kubernetes.io/gpu-node`), and then add the label in **Advanced Settings** [when you create a workload](../../project-user-guide/application-workloads/deployments/#step-5-configure-advanced-settings) so that you can allow Pods to run on GPU nodes explicitly. To add node labels, click **More** and select **Edit Labels**.
-
+ 
-
+ 
-
+ 
- **Taints**: Taints allow a node to repel a set of pods. You add or remove node taints on the node detail page. To add or delete taints, click **More** and select **Taint Management** from the drop-down menu.
-
-
-{{< notice note >}}
+ 
+ {{< notice note >}}
Be careful when you add taints as they may cause unexpected behavior, leading to services unavailable. For more information, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
-
-{{ notice >}}
+ {{ notice >}}
## Add and Remove Nodes
-Currently, you cannot add or remove nodes directly from the KubeSphere console, but you can do it by using [KubeKey](https://github.com/kubesphere/kubekey). For more information, see [Add New Nodes](../../installing-on-linux/cluster-operation/add-new-nodes/) and [Remove Nodes](../../installing-on-linux/cluster-operation/remove-nodes/).
\ No newline at end of file
+Currently, you cannot add or remove nodes directly from the KubeSphere console, but you can do it by using [KubeKey](https://github.com/kubesphere/kubekey). For more information, see [Add New Nodes](../../installing-on-linux/cluster-operation/add-new-nodes/) and [Remove Nodes](../../installing-on-linux/cluster-operation/remove-nodes/).
diff --git a/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md b/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md
new file mode 100644
index 000000000..49b8c0f04
--- /dev/null
+++ b/content/en/docs/cluster-administration/persistent-volume-and-storage-class.md
@@ -0,0 +1,180 @@
+---
+title: "Persistent Volume and Storage Class"
+keywords: "storage, volume, pv, pvc, storage class, csi, Ceph RBD, Glusterfs, QingCloud, "
+description: "Persistent Volume and Storage Class Management"
+
+linkTitle: "Persistent Volume and Storage Class"
+weight: 100
+---
+
+This tutorial describes the basic concepts of PVs, PVCs and storage classes and demonstrates how a cluster administrator can manage storage classes and persistent volumes in KubeSphere.
+
+## Introduction
+
+A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. PVs can be provisioned either [statically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static) or [dynamically](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#dynamic).
+
+A PersistentVolumeClaim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources.
+
+KubeSphere supports [dynamic volume provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) based on storage classes to create PVs.
+
+A [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes) provides a way for administrators to describe the classes of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Each StorageClass has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. For which value to use, please read [the official Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner) or check with your storage administrator.
+
+The table below summarizes common volume plugins for various provisioners (storage systems).
+
+| Type | Description |
+| -------------------- | ------------------------------------------------------------ |
+| In-tree | Built-in and run as part of Kubernetes, such as [RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) and [Glusterfs](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs). For more plugins of this kind, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). |
+| External-provisioner | Deployed independently from Kubernetes, but works like an in-tree plugin, such as [nfs-client](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client). For more plugins of this kind, see [External Storage](https://github.com/kubernetes-retired/external-storage). |
+| CSI | Container Storage Interface, a standard for exposing storage resources to workloads on COs (e.g. Kubernetes), such as [QingCloud-csi](https://github.com/yunify/qingcloud-csi) and [Ceph-CSI](https://github.com/ceph/ceph-csi). For more plugins of this kind, see [Drivers](https://kubernetes-csi.github.io/docs/drivers.html). |
+
+## Prerequisites
+
+You need an account granted a role including the authorization of **Clusters Management**. For example, you can log in the console as `admin` directly or create a new role with the authorization and assign it to an account.
+
+## Manage Storage Class
+
+1. Click **Platform** in the top left corner and select **Clusters Management**.
+ 
+
+2. If you have enabled the [multi-cluster feature](../../multicluster-management) with member clusters imported, you can select a specific cluster. If you have not enabled the feature, refer to the next step directly.
+
+3. On the **Cluster Management** page, navigate to **Storage Classes** under **Storage**, where you can create, update and delete a storage class.
+ 
+
+4. To create a storage class, click **Create** and enter the basic information in the pop-up window. When you finish, click **Next**.
+ 
+
+5. In KubeSphere, you can create storage classes for `QingCloud-CSI`, `Glusterfs` and `Ceph RBD` directly. Alternatively, you can also create customized storage classes for other storage systems based on your needs. Select a type and click **Next**.
+ 
+
+ 
+
+### Common Settings
+
+Some settings are commonly used and shared among storage classes. You can find them as dashboard properties on the console, which are also indicated by fields or annotations in the StorageClass manifest. You can see the manifest file in YAML format by enabling **Edit Mode** in the top right corner.
+Here are property descriptions of some commonly used fields in KubeSphere.
+
+| Property | Description |
+| :---- | :---- |
+| Allow Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. When it is set to `true`, PVs can be configured to be expandable. For more information, see [Allow Volume Expansion](https://kubernetes.io/docs/concepts/storage/storage-classes/#allow-volume-expansion). |
+| Reclaiming Policy | Specified by `reclaimPolicy` in the manifest. It can be set to `Delete` or `Retain` (default). For more information, see [Reclaim Policy](https://kubernetes.io/docs/concepts/storage/storage-classes/#reclaim-policy). |
+| Storage System | Specified by `provisioner` in the manifest. It determines what volume plugin is used for provisioning PVs. For more information, see [Provisioner](https://kubernetes.io/docs/concepts/storage/storage-classes/#provisioner). |
+| Supported Access Mode | Specified by `metadata.annotations[storageclass.kubesphere.io/supported-access-modes]` in the manifest. It tells KubeSphere which [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) is supported. |
+
+For other settings, you need to provide different information for different storage plugins, which, in the manifest, are always indicated under the field `parameters`. They will be described in detail in the sections below. You can also refer to [Parameters](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters) in the official documentation of Kubernetes.
+
+### QingCloud CSI
+
+QingCloud CSI is a CSI plugin on Kubernetes for the volume of QingCloud. Storage classes of QingCloud CSI can be created on the KubeSphere console.
+
+#### Prerequisites
+
+- QingCloud CSI can be used on both public cloud and private cloud of QingCloud. Therefore, make sure KubeSphere has been installed on either of them so that you can use cloud volumes.
+- QingCloud CSI Plugin has been installed on your KubeSphere cluster. See [QingCloud-CSI Installation](https://github.com/yunify/qingcloud-csi#installation) for more information.
+
+#### Settings
+
+
+
+| Property | Description |
+| :---- | :---- |
+| type | On the QingCloud platform, 0 represents high performance volumes. 2 represents high capacity volumes. 3 represents super high performance volumes. 5 represents Enterprise Server SAN. 6 represents NeonSan HDD. 100 represents standard volumes. 200 represents enterprise SSD. |
+| maxSize | The volume size upper limit. |
+| stepSize | The volume size increment. |
+| minSize | The volume size lower limit. |
+| fsType | Filesystem type of the volume: ext3, ext4 (default), xfs. |
+| tags | The ID of QingCloud Tag resource, split by commas. |
+
+More storage class parameters can be seen in [QingCloud-CSI user guide](https://github.com/yunify/qingcloud-csi/blob/master/docs/user-guide.md#set-storage-class).
+
+### Glusterfs
+
+Glusterfs is an in-tree storage plugin on Kubernetes, which means you don't need to install a volume plugin additionally.
+
+#### Prerequisites
+
+The Glusterfs storage system has already been installed. See [GlusterFS Installation Documentation](https://www.gluster.org/install/) for more information.
+
+#### Settings
+
+| Property | Description |
+| :---- | :---- |
+| resturl | The Gluster REST service/Heketi service url which provision gluster volumes on demand. |
+| clusterid | The ID of the cluster which will be used by Heketi when provisioning the volume. |
+| restauthenabled | Gluster REST service authentication boolean that enables authentication to the REST server. |
+| restuser | The Glusterfs REST service/Heketi user who has access to create volumes in the Glusterfs Trusted Pool. |
+| secretNamespace, secretName | The Identification of Secret instance that contains user password to use when talking to Gluster REST service. |
+| gidMin, gidMax | The minimum and maximum value of GID range for the StorageClass. |
+| volumetype | The volume type and its parameters can be configured with this optional value. |
+
+For more information about StorageClass parameters, see [Glusterfs in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs).
+
+### Ceph RBD
+
+Ceph RBD is also an in-tree storage plugin on Kubernetes. The volume plugin is already in Kubernetes,
+but the storage server must be installed before you create the storage class of Ceph RBD.
+
+As **hyperkube** images were [deprecated since 1.17](https://github.com/kubernetes/kubernetes/pull/85094), in-tree Ceph RBD may not work without **hyperkube**.
+Nevertheless, you can use [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) as a substitute, whose format is the same as in-tree Ceph RBD. The only different parameter is `provisioner` (i.e **Storage System** on the KubeSphere console). If you want to use rbd-provisioner, the value of `provisioner` must be `ceph.com/rbd` (Input this value in **Storage System** in the image below). If you use in-tree Ceph RBD, the value must be `kubernetes.io/rbd`.
+
+
+
+#### Prerequisites
+
+- The Ceph server has already been installed. See [Ceph Installation Documentation](https://docs.ceph.com/en/latest/install/) for more information.
+- Install the plugin if you choose to use rbd-provisioner. Community developers provide [charts for rbd provisioner](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner) that you can use to install rbd-provisioner by helm.
+
+#### Settings
+
+| Property | Description |
+| :---- | :---- |
+| monitors| The Ceph monitors, comma delimited. |
+| adminId| The Ceph client ID that is capable of creating images in the pool. |
+| adminSecretName| The Secret Name for `adminId`. |
+| adminSecretNamespace| The namespace for `adminSecretName`. |
+| pool | The Ceph RBD pool. |
+| userId | The Ceph client ID that is used to map the RBD image. |
+| userSecretName | The name of Ceph Secret for `userId` to map RBD image. |
+| userSecretNamespace | The namespace for `userSecretName`. |
+| fsType | The fsType that is supported by Kubernetes. |
+| imageFormat | The Ceph RBD image format, `1` or `2`. |
+| imageFeatures| This parameter is optional and should only be used if you set `imageFormat` to `2`. |
+
+For more information about StorageClass parameters, see [Ceph RBD in Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd).
+
+### Custom Storage Class
+
+You can create custom storage classes for your storage systems if they are not directly supported by KubeSphere. The following example shows you how to create a storage class for NFS on the KubeSphere console.
+
+#### NFS Introduction
+
+NFS (Net File System) is widely used on Kubernetes with the external-provisioner volume plugin
+[nfs-client](https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client). You can create the storage class of nfs-client by clicking **Custom** in the image below.
+
+
+
+#### Prerequisites
+
+- An available NFS server.
+- The volume plugin nfs-client has already been installed. Community developers provide [charts for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner) that you can use to install nfs-client by helm.
+
+#### Common Settings
+
+
+
+| Property | Description |
+| :---- | :---- |
+| Storage System | Specified by `provisioner` in the manifest. If you install the storage class by [charts for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner), it can be `cluster.local/nfs-client-nfs-client-provisioner`. |
+| Allow Volume Expansion | Specified by `allowVolumeExpansion` in the manifest. Select `No`. |
+| Reclaiming Policy | Specified by `reclaimPolicy` in the manifest. The value is `Delete` by default. |
+| Supported Access Mode | Specified by `.metadata.annotations.storageclass.kubesphere.io/supported-access-modes` in the manifest. `ReadWriteOnce`, `ReadOnlyMany` and `ReadWriteMany` all are selected by default. |
+
+#### Parameters
+
+| Key| Description | Value |
+| :---- | :---- | :----|
+| archiveOnDelete | Archive pvc when deleting | `true` |
+
+## Manage Volumes
+
+Once the storage class is created, you can create volumes with it. You can list, create, update and delete volumes in **Volumes** under **Storage** on the KubeSphere console. For more details, please see [Volume Management](../../project-user-guide/storage/volumes/).
diff --git a/content/en/docs/cluster-administration/platform-settings/customize-basic-information.md b/content/en/docs/cluster-administration/platform-settings/customize-basic-information.md
new file mode 100644
index 000000000..9cc7f5315
--- /dev/null
+++ b/content/en/docs/cluster-administration/platform-settings/customize-basic-information.md
@@ -0,0 +1,10 @@
+---
+title: "Customizing Platform Information"
+keywords: 'KubeSphere, Kubernetes, customize, platform'
+description: 'How to Customizing KubeSphere Platform Information.'
+
+linkTitle: "Customizing Platform Information"
+weight: 4270
+---
+
+TBD
\ No newline at end of file
diff --git a/content/en/docs/cluster-administration/Shuting Down and Restart Cluster cracefully.md b/content/en/docs/cluster-administration/shuting-down-and-restart-cluster-cracefully.md
similarity index 100%
rename from content/en/docs/cluster-administration/Shuting Down and Restart Cluster cracefully.md
rename to content/en/docs/cluster-administration/shuting-down-and-restart-cluster-cracefully.md
diff --git a/content/en/docs/cluster-administration/storageclass.md b/content/en/docs/cluster-administration/storageclass.md
deleted file mode 100644
index db100ea30..000000000
--- a/content/en/docs/cluster-administration/storageclass.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: "StorageClass"
-keywords: "kubernetes, docker, kubesphere, jenkins, istio, prometheus"
-description: "Kubernetes and KubeSphere node management"
-
-linkTitle: "StorageClass"
-weight: 100
----
diff --git a/content/en/docs/devops-user-guide/_index.md b/content/en/docs/devops-user-guide/_index.md
index ae6a0e36c..b5166f340 100644
--- a/content/en/docs/devops-user-guide/_index.md
+++ b/content/en/docs/devops-user-guide/_index.md
@@ -15,38 +15,38 @@ As you install the DevOps component, Jenkins is automatically deployed. KubeSphe
## Using DevOps
-[DevOps Project Management](../devops-user-guide/how-to-use/devops-project-management/)
+### [DevOps Project Management](../devops-user-guide/how-to-use/devops-project-management/)
Create and manage DevOps projects, as well as roles and members in them.
-[Create a Pipeline Using Jenkinsfile](../devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile/)
+### [Create a Pipeline Using Jenkinsfile](../devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile/)
Learn how to create and run a pipeline by using an example Jenkinsfile.
-[Create a Pipeline Using Graphical Editing Panel](../devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel/)
+### [Create a Pipeline Using Graphical Editing Panel](../devops-user-guide/how-to-use/create-a-pipeline-using-graphical-editing-panel/)
Learn how to create and run a pipeline by using the graphical editing panel of KubeSphere.
-[Choose Jenkins Agent](../devops-user-guide/how-to-use/choose-jenkins-agent/)
+### [Choose Jenkins Agent](../devops-user-guide/how-to-use/choose-jenkins-agent/)
Specify the Jenkins agent and use the built-in podTemplate for your pipeline.
-[Credential Management](../devops-user-guide/how-to-use/credential-management/)
+### [Credential Management](../devops-user-guide/how-to-use/credential-management/)
Create credentials so that your pipelines can communicate with third-party applications or websites.
-[Set CI Node for Dependency Cathe](../devops-user-guide/how-to-use/set-ci-node/)
+### [Set CI Node for Dependency Cathe](../devops-user-guide/how-to-use/set-ci-node/)
Configure a node or a group of nodes specifically for continuous integration (CI) to speed up the building process in a pipeline.
-[Set Email Server for KubeSphere Pipelines](../devops-user-guide/how-to-use/jenkins-email/)
+### [Set Email Server for KubeSphere Pipelines](../devops-user-guide/how-to-use/jenkins-email/)
Set the email server to receive notifications of your Jenkins pipelines.
-[Jenkins System Settings](../devops-user-guide/how-to-use/jenkins-setting/)
+### [Jenkins System Settings](../devops-user-guide/how-to-use/jenkins-setting/)
Learn how to customize your Jenkins settings.
## Tool Integration
-[Integrate SonarQube into Pipeline](../devops-user-guide/how-to-integrate/sonarqube/)
\ No newline at end of file
+### [Integrate SonarQube into Pipeline](../devops-user-guide/how-to-integrate/sonarqube/)
\ No newline at end of file
diff --git a/content/en/docs/devops-user-guide/examples/a-maven-project.md b/content/en/docs/devops-user-guide/examples/a-maven-project.md
new file mode 100644
index 000000000..75313f760
--- /dev/null
+++ b/content/en/docs/devops-user-guide/examples/a-maven-project.md
@@ -0,0 +1,177 @@
+---
+title: "How to build and deploy a maven project"
+keywords: 'kubernetes, docker, devops, jenkins, maven'
+description: ''
+linkTitle: "Build And Deploy A Maven Project"
+weight: 200
+---
+
+## Prerequisites
+
+- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
+- You need to create [DockerHub](http://www.dockerhub.com/) account.
+- You need to create a workspace, a DevOps project, and a user account, and this account needs to be invited into the DevOps project as the role of maintainer.
+
+## Workflow for Maven Project
+
+
+
+As is shown in the graph, there is the workflow for a maven project in KubeSphere DevOps.
+
+It uses the pipeline of Jenkins to build and deploy the maven project in KubeSphere DevOps. All steps are defined in the pipeline.
+
+When running, Jenkins Master create the pod to run the pipeline. Kubernetes creates the pod as the agent of Jenkins Master and will be destoryed after pipeline finished. The main process is to clone code, build & push image, and deploy the workload.
+
+## Default Configurations in Jenkins
+
+### Maven Version
+
+Executing the following command in the maven builder container to get version info.
+
+```bash
+mvn --version
+
+Apache Maven 3.5.3 (3383c37e1f9e9b3bc3df5050c29c8aff9f295297; 2018-02-24T19:49:05Z)
+Maven home: /opt/apache-maven-3.5.3
+Java version: 1.8.0_232, vendor: Oracle Corporation
+Java home: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.232.b09-0.el7_7.i386/jre
+Default locale: en_US, platform encoding: UTF-8
+```
+
+### Maven Cache
+
+Jenkins Agent mounts the directories by Docker Volume on the node. So, the pipeline can cache some spicial directory, such as `/root/.m2`, which is used for the maven building.
+
+`/root/.m2` is the default cache directory for maven tools in KubeSphere DevOps. The dependency packages are e downloaded and cached and there won't be network request if it's used next time.
+
+### Global Maven Setting in Jenkins Agent
+
+The default maven settings file path is maven and the configuration file path is `/opt/apache-maven-3.5.3/conf/settings.xml` .
+
+Executing the following command to get the content of Maven Setting.
+
+```bash
+kubectl get cm -n kubesphere-devops-system ks-devops-agent -o yaml
+```
+
+### Network of Maven Pod
+
+The Pod labeled maven uses the docker-in-docker network to run the pipeline. That is, the `/var/run/docker.sock` in the node is mounted into the maven container.
+
+## An example of a maven pipeline
+
+### Prepare for the Maven Project
+
+- ensure build the maven project successfully on the development device.
+- add the Dockerfile file into the project repo for building the image, refer to https://github.com/kubesphere/devops-java-sample/blob/master/Dockerfile-online
+- add the yaml file into the project repo for deploy the workload, refer to https://github.com/kubesphere/devops-java-sample/tree/master/deploy/dev-ol. If there are different environments, you need to prepare multiple deployment files.
+
+### Create the Credentials
+
+- dockerhub-id. A *Account Credentials* for registry, e.g DockerHub.
+- demo-kuebconfig. A *Kubeconfig Credential* for deploying workloads.
+
+For details, please refer to the [Credentials Management](../../how-to-use/credential-management/).
+
+
+
+### Create the Project for Workloads
+
+In this demo, all of workload are deployed under kubesphere-sample-dev. So, you need to create namespaces `kubesphere-sample-dev` in advance.
+
+
+
+### Create the Pipeline for the Maven Project
+
+At First, create a *DevOps Project* and a *Pipeline* refer to [Create a Pipeline - using Graphical Editing Panel](../../how-to-use/create-a-pipeline-using-graphical-editing-panel) .
+
+Secondly, click *Edit Jenkinsfile* button under your pipeline.
+
+
+
+Paste the following text into the pop-up window and save it.
+
+```groovy
+pipeline {
+ agent {
+ node {
+ label 'maven'
+ }
+ }
+
+ parameters {
+ string(name:'TAG_NAME',defaultValue: '',description:'')
+ }
+
+ environment {
+ DOCKER_CREDENTIAL_ID = 'dockerhub-id'
+ KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig'
+ REGISTRY = 'docker.io'
+ // need to replace by yourself dockerhub namespace
+ DOCKERHUB_NAMESPACE = 'shaowenchen'
+ APP_NAME = 'devops-java-sample'
+ BRANCH_NAME = 'dev'
+ }
+
+ stages {
+ stage ('checkout scm') {
+ steps {
+ git branch: 'master', url: "https://github.com/kubesphere/devops-java-sample.git"
+ }
+ }
+
+ stage ('unit test') {
+ steps {
+ container ('maven') {
+ sh 'mvn clean -o -gs `pwd`/configuration/settings.xml test'
+ }
+ }
+ }
+
+ stage ('build & push') {
+ steps {
+ container ('maven') {
+ sh 'mvn -o -Dmaven.test.skip=true -gs `pwd`/configuration/settings.xml clean package'
+ sh 'docker build -f Dockerfile-online -t $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER .'
+ withCredentials([usernamePassword(passwordVariable : 'DOCKER_PASSWORD' ,usernameVariable : 'DOCKER_USERNAME' ,credentialsId : "$DOCKER_CREDENTIAL_ID" ,)]) {
+ sh 'echo "$DOCKER_PASSWORD" | docker login $REGISTRY -u "$DOCKER_USERNAME" --password-stdin'
+ sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/$APP_NAME:SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER'
+ }
+ }
+ }
+ }
+
+ stage('deploy to dev') {
+ steps {
+ kubernetesDeploy(configs: 'deploy/dev-ol/**', enableConfigSubstitution: true, kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
+ }
+ }
+ }
+}
+```
+
+After saving, you will get this.
+
+
+
+### Run and test
+
+Click `run` and type `TAG_NAME` to run the pipeline.
+
+
+
+After the run is complete, you can see the following figure.
+
+
+
+Under the project of `kubesphere-sample-dev`, there are new workloads created.
+
+
+
+You can view the access address of the service through service.
+
+
+
+## Summary
+
+This document is not a getting started document. It introduces some configurations for building maven projects on the KubeSphere DevOps Platform. At the same time, a example flow of the maven project is provided. In your case, you are free to add new steps to improve the pipeline.
diff --git a/content/en/docs/devops-user-guide/examples/go-project-pipeline.md b/content/en/docs/devops-user-guide/examples/go-project-pipeline.md
new file mode 100644
index 000000000..3438f205d
--- /dev/null
+++ b/content/en/docs/devops-user-guide/examples/go-project-pipeline.md
@@ -0,0 +1,160 @@
+---
+title: "Build and Deploy a Go Project"
+keywords: 'Kubernetes, docker, devops, jenkins, go, KubeSphere'
+description: 'This tutorial demonstrates how to build and deploy a Go project.'
+linkTitle: "Build and Deploy a Go Project"
+weight: 200
+---
+
+## Prerequisites
+
+- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/).
+- You need to have a [Docker Hub](https://hub.docker.com/) account.
+- You need to create a workspace, a DevOps project, a project, and an account (`project-regular`). This account needs to be invited to the DevOps project and the project with the role `operator`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project).
+
+## Create Docker Hub Access Token
+
+1. Sign in [Docker Hub](https://hub.docker.com/) and select **Account Settings** from the menu in the top right corner.
+
+ 
+
+2. Click **Security** and **New Access Token**.
+
+ 
+
+3. Enter the token name and click **Create**.
+
+ 
+
+4. Click **Copy and Close** and remember to save the access token.
+
+ 
+
+## Create Credentials
+
+You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for imaging pushing. Besides, you also create kubeconfig credentials for the access to the Kubernetes cluster.
+
+1. Log in the web console of KubeSphere as `project-regular`. Go to your DevOps project and click **Create** in **Credentials**.
+
+ 
+
+2. In the dialogue that appears, set a **Credential ID**, which will be used later in the Jenkinsfile, and select **Account Credentials** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Token/Password**. When you finish, click **OK**.
+
+ 
+
+{{< notice tip >}}
+
+For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/).
+
+{{ notice >}}
+
+3. Click **Create** again and select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current user account. Set a **Credential ID** and click **OK**.
+
+ 
+
+## Create a Pipeline
+
+With the above credentials ready, you can create a pipeline using an example Jenkinsfile as below.
+
+1. To create a pipeline, click **Create** on the **Pipelines** page.
+
+ 
+
+2. Set a name in the pop-up window and click **Next** directly.
+
+ 
+
+3. In this tutorial, you can use default values for all the fields. In **Advanced Settings**, click **Create** directly.
+
+ 
+
+## Edit Jenkinsfile
+
+1. In the pipeline list, click this pipeline to go to its detail page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it.
+
+ 
+
+2. Copy and paste all the content below to the pop-up window as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, and `PROJECT_NAME` with yours. When you finish, click **OK**.
+
+ ```groovy
+ pipeline {
+ agent {
+ node {
+ label 'maven'
+ }
+ }
+
+ environment {
+ // the address of your harbor registry
+ REGISTRY = 'docker.io'
+ // your docker hub username
+ DOCKERHUB_USERNAME = 'yuswift'
+ // docker image name
+ APP_NAME = 'devops-go-sample'
+ // ‘dockerhubid’ is the credential id you created in KubeSphere for docker access token
+ DOCKERHUB_CREDENTIAL = credentials('dockerhubid')
+ //the kubeconfig credential id you created in KubeSphere
+ KUBECONFIG_CREDENTIAL_ID = 'go'
+ // the name of the project you created in KubeSphere, not the DevOps project name
+ PROJECT_NAME = 'devops-go'
+ }
+
+ stages {
+ stage('docker login') {
+ steps{
+ container ('maven') {
+ sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin'
+ }
+ }
+ }
+
+ stage('build & push') {
+ steps {
+ container ('maven') {
+ sh 'git clone https://github.com/yuswift/devops-go-sample.git'
+ sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .'
+ sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME'
+ }
+ }
+ }
+ stage ('deploy app') {
+ steps {
+ container('maven') {
+ kubernetesDeploy(configs: 'devops-go-sample/manifest/deploy.yaml', kubeconfigId: "$KUBECONFIG_CREDENTIAL_ID")
+ }
+ }
+ }
+ }
+ }
+ ```
+
+{{< notice note >}}
+
+If your pipeline runs successfully, images will be pushed to Docker Hub. If you are using Harbor, you cannot pass the parameter to `docker login -u` via the Jenkins credential with environment variables. This is because every Harbor robot account username contains a `$` character, which will be converted to `$$` by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/).
+
+{{ notice >}}
+
+## Run Pipeline
+
+1. After you finish the Jenkinsfile, you can see graphical panels display on the dashboard. Click **Run** to run the pipeline.
+
+ 
+
+2. In **Activity**, you can see the status of the pipeline. It may take a while before it successfully runs.
+
+ 
+
+
+## Verify Results
+
+1. A **Deployment** will be created in the project specified in the Jenkinsfile if the pipeline runs successfully.
+
+ 
+
+2. Check whether the image is pushed to Docker Hub as shown below:
+
+ 
+
+ 
+
+
\ No newline at end of file
diff --git a/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md b/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md
new file mode 100644
index 000000000..f274c37ec
--- /dev/null
+++ b/content/en/docs/devops-user-guide/examples/multi-cluster-project-example.md
@@ -0,0 +1,155 @@
+---
+title: "Deploy Apps in a Multi-cluster Project Using Jenkinsfile"
+keywords: 'Kubernetes, KubeSphere, docker, devops, jenkins, multi-cluster'
+description: 'This tutorial demonstrates how to deploy apps in a multi-cluster project using a Jenkinsfile.'
+linkTitle: "Deploy Apps in a Multi-cluster Project Using Jenkinsfile"
+weight: 300
+---
+
+## Prerequisites
+
+- You need to [enable the multi-cluster feature](../../../../docs/multicluster-management/).
+- You need to have a [Docker Hub](https://hub.docker.com/) account.
+- You need to [enable KubeSphere DevOps System](../../../../docs/pluggable-components/devops/) on your host cluster.
+- You need to create a workspace with multiple clusters, a DevOps project on your **host** cluster, a multi-cluster project (in this tutorial, this multi-cluster project is created on the host cluster and one member cluster), and an account (`project-regular`). This account needs to be invited to the DevOps project and the multi-cluster project with the role `operator`. For more information, see [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project), [Multi-cluster Management](../../../multicluster-management) and [Multi-cluster Projects](../../../project-administration/project-and-multicluster-project/#multi-cluster-projects).
+
+## Create Docker Hub Access Token
+
+1. Sign in [Docker Hub](https://hub.docker.com/) and select **Account Settings** from the menu in the top right corner.
+
+ 
+
+2. Click **Security** and **New Access Token**.
+
+ 
+
+3. Enter the token name and click **Create**.
+
+ 
+
+4. Click **Copy and Close** and remember to save the access token.
+
+ 
+
+## Create Credentials
+
+You need to create credentials in KubeSphere for the access token created so that the pipeline can interact with Docker Hub for imaging pushing. Besides, you also need to create kubeconfig credentials for the access to the Kubernetes cluster.
+
+1. Log in the web console of KubeSphere as `project-regular`. Go to your DevOps project and click **Create** in **Credentials**.
+
+ 
+
+2. In the dialogue that appears, set a **Credential ID**, which will be used later in the Jenkinsfile, and select **Account Credentials** for **Type**. Enter your Docker Hub account name for **Username** and the access token just created for **Token/Password**. When you finish, click **OK**.
+
+ 
+
+ {{< notice tip >}}
+
+ For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/).
+
+ {{ notice >}}
+
+3. Click **Create** again and select **kubeconfig** for **Type**. Note that KubeSphere automatically populates the **Content** field, which is the kubeconfig of the current user account. Set a **Credential ID** and click **OK**.
+
+ 
+
+## Create a Pipeline
+
+With the above credentials ready, you can create a pipeline using an example Jenkinsfile as below.
+
+1. To create a pipeline, click **Create** on the **Pipelines** page.
+
+ 
+
+2. Set a name in the pop-up window and click **Next** directly.
+
+ 
+
+3. In this tutorial, you can use default values for all the fields. In **Advanced Settings**, click **Create** directly.
+
+ 
+
+## Edit Jenkinsfile
+
+1. In the pipeline list, click this pipeline to go to its detail page. Click **Edit Jenkinsfile** to define a Jenkinsfile and your pipeline runs based on it.
+
+ 
+
+2. Copy and paste all the content below to the pop-up window as an example Jenkinsfile for your pipeline. You must replace the value of `DOCKERHUB_USERNAME`, `DOCKERHUB_CREDENTIAL`, `KUBECONFIG_CREDENTIAL_ID`, `MULTI_CLUSTER_PROJECT_NAME`, and `MEMBER_CLUSTER_NAME` with yours. When you finish, click **OK**.
+
+ ```
+ pipeline {
+ agent {
+ node {
+ label 'maven'
+ }
+
+ }
+
+ environment {
+ REGISTRY = 'docker.io'
+ // username of dockerhub
+ DOCKERHUB_USERNAME = 'yuswift'
+ APP_NAME = 'devops-go-sample'
+ // ‘dockerhubid’ is the dockerhub credential id you created on ks console
+ DOCKERHUB_CREDENTIAL = credentials('dockerhubid')
+ // the kubeconfig credential id you created on ks console
+ KUBECONFIG_CREDENTIAL_ID = 'multi-cluster'
+ // mutli-cluster project name under your own workspace
+ MULTI_CLUSTER_PROJECT_NAME = 'devops-with-go'
+ // the member cluster name you want to deploy app on
+ // in this tutorial, you are assumed to deploy app on host and only one member cluster
+ // for more member clusters, please edit manifest/multi-cluster-deploy.yaml
+ MEMBER_CLUSTER_NAME = 'c9'
+ }
+
+ stages {
+ stage('docker login') {
+ steps {
+ container('maven') {
+ sh 'echo $DOCKERHUB_CREDENTIAL_PSW | docker login -u $DOCKERHUB_CREDENTIAL_USR --password-stdin'
+ }
+
+ }
+ }
+
+ stage('build & push') {
+ steps {
+ container('maven') {
+ sh 'git clone https://github.com/yuswift/devops-go-sample.git'
+ sh 'cd devops-go-sample && docker build -t $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME .'
+ sh 'docker push $REGISTRY/$DOCKERHUB_USERNAME/$APP_NAME'
+ }
+ }
+ }
+
+ stage('deploy app to multi cluster') {
+ steps {
+ container('maven') {
+ script {
+ withCredentials([
+ kubeconfigFile(
+ credentialsId: 'multi-cluster',
+ variable: 'KUBECONFIG')
+ ]) {
+ sh 'envsubst < devops-go-sample/manifest/multi-cluster-deploy.yaml | kubectl apply -f -'
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ ```
+
+ {{< notice note >}}
+
+ If your pipeline runs successfully, images will be pushed to Docker Hub. If you are using Harbor, you cannot pass the parameter to `docker login -u` via the Jenkins credential with environment variables. This is because every Harbor robot account username contains a `$` character, which will be converted to `$$` by Jenkins when used by environment variables. [Learn more](https://number1.co.za/rancher-cannot-use-harbor-robot-account-imagepullbackoff-pull-access-denied/).
+
+ {{ notice >}}
+
+## Run Pipeline
+
+After you save the Jenkinsfile, click **Run**. If everything goes well, you will see the Deployment workload in your multi-cluster project.
+
+
\ No newline at end of file
diff --git a/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md b/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md
index 0890e13c6..d3c74838c 100644
--- a/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md
+++ b/content/en/docs/devops-user-guide/how-to-integrate/sonarqube.md
@@ -225,7 +225,7 @@ kubectl -n kubesphere-system rollout restart deploy ks-apiserver
kubectl -n kubesphere-system rollout restart deploy ks-console
```
-## Create SonarQube Token for New Projetct
+## Create SonarQube Token for New Project
You need a SonarQube token so that your pipeline can communicate with SonarQube as it runs.
diff --git a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md
index cf637042a..b97f460a2 100644
--- a/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md
+++ b/content/en/docs/devops-user-guide/how-to-use/create-a-pipeline-using-jenkinsfile.md
@@ -1,217 +1,231 @@
---
-title: "Create a pipeline using jenkinsfile"
-keywords: 'kubesphere, kubernetes, docker, spring boot, jenkins, devops, ci/cd, pipeline'
-description: "Create a pipeline using jenkinsfile"
-linkTitle: "Create a pipeline using jenkinsfile"
+title: "Create a Pipeline Using a Jenkinsfile"
+keywords: 'KubeSphere, Kubernetes, docker, spring boot, Jenkins, devops, ci/cd, pipeline'
+description: "How to create a pipeline using a Jenkinsfile."
+linkTitle: "Create a Pipeline Using a Jenkinsfile"
weight: 200
---
-## Objective
+A Jenkinsfile is a text file that contains the definition of a Jenkins pipeline and is checked into source control. As it stores the entire workflow as code, it underpins the code review and iteration process of a pipeline. For more information, see [the official documentation of Jenkins](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/).
-In this tutorial, we will show you how to create a pipeline based on the Jenkinsfile from a GitHub repository. Using the pipeline, we will deploy a demo application to a development environment and a production environment respectively. Meanwhile, we will demo a branch that is used to test dependency caching capability. In this demo, it takes a relatively long time to finish the pipeline for the first time. However, it runs very faster since then. It proves the cache works well since this branch pulls lots of dependency from internet initially.
+This tutorial demonstrates how to create a pipeline based on a Jenkinsfile from a GitHub repository. Using the pipeline, you deploy an example application to a development environment and a production environment respectively, which is accessible externally.
-> Note:
-> KubeSphere supports two kinds of pipeline, i.e., Jenkinsfile in SCM which is introduced in this document and [Create a Pipeline - using Graphical Editing Panel](../create-a-pipeline-using-graphical-editing-panel). Jenkinsfile in SCM requires an internal Jenkinsfile in Source Control Management (SCM). In another word, Jenkfinsfile serves as a part of SCM. KubeSphere DevOps system will automatically build a CI/CD pipeline depending on existing Jenkinsfile of the code repository. You can define workflow like Stage, Step and Job in the pipeline.
+{{< notice note >}}
+
+Two types of pipelines can be created in KubeSphere: Pipelines created based on a Jenkinsfile in SCM, which is introduced in this tutorial, and [pipelines created through the graphical editing panel](../create-a-pipeline-using-graphical-editing-panel). The Jenkinsfile in SCM requires an internal Jenkinsfile in Source Control Management (SCM). In other words, the Jenkfinsfile serves as part of SCM. The KubeSphere DevOps system automatically builds a CI/CD pipeline based on the existing Jenkinsfile of the code repository. You can define workflows such as `stage` and `step`.
+
+{{ notice >}}
## Prerequisites
-- You need to have a DokcerHub account and a GitHub account.
-- You need to create a workspace, a DevOps project, and a **project-regular** user account, and this account needs to be invited into a DevOps project.
-- Set CI dedicated node for building pipeline, please refer to [Set CI Node for Dependency Cache](../../how-to-use/set-ci-node/).
-- You need to install and configure sonarqube, please refer to [How to integrate SonarQube in Pipeline
-](../../../how-to-integrate/sonarqube/) . Or you can skip this part, There is no **Sonarqube Analysis** below.
+- You need to have a [Docker Hub](https://hub.docker.com/) account and a [GitHub](https://github.com/) account.
+- You need to [enable KubeSphere DevOps system](../../../pluggable-components/devops/).
+- You need to create a workspace, a DevOps project, and an account (`project-regular`). This account needs to be invited to the DevOps project with the `operator` role. See [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project/) if they are not ready.
+- You need to set a CI dedicated node for running pipelines. Refer to [Set CI Node for Dependency Cache](../../how-to-use/set-ci-node/).
+- You need to install and configure SonarQube. Refer to [Integrate SonarQube into Pipeline](../../../devops-user-guide/how-to-integrate/sonarqube/). If you skip this part, there is no **SonarQube Analysis** below.
## Pipeline Overview
-There are eight stages as shown below in the pipeline that is going to demonstrate.
+There are eight stages as shown below in this example pipeline.

-> Note:
+{{< notice note >}}
-> - **Stage 1. Checkout SCM**: Checkout source code from GitHub repository.
-> - **Stage 2. Unit test**: It will continue to execute next stage after unit test passed.
-> - **Stage 3. SonarQube analysis**:Process sonarQube code quality analysis.
-> - **Stage 4.** **Build & push snapshot image**: Build the image based on selected branches in the behavioral strategy. Push the tag of `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` to DockerHub, among which, the `$BUILD_NUMBER` is the operation serial number in the pipeline's activity list.
-> - **Stage 5. Push the latest image**: Tag the sonarqube branch as latest and push it to DockerHub.
-> - **Stage 6. Deploy to dev**: Deploy sonarqube branch to Dev environment. verification is needed for this stage.
-> - **Stage 7. Push with tag**: Generate tag and released to GitHub. Then push the tag to DockerHub.
-> - **Stage 8. Deploy to production**: Deploy the released tag to the Production environment.
+- **Stage 1. Checkout SCM**: Check out source code from the GitHub repository.
+- **Stage 2. Unit test**: It will not proceed with the next stage unit the test is passed.
+- **Stage 3. SonarQube analysis**: The SonarQube code quality analysis.
+- **Stage 4.** **Build & push snapshot image**: Build the image based on selected branches in **Behavioral strategy**. Push the tag of `SNAPSHOT-$BRANCH_NAME-$BUILD_NUMBER` to Docker Hub, the `$BUILD_NUMBER` of which is the operation serial number in the pipeline's activity list.
+- **Stage 5. Push the latest image**: Tag the sonarqube branch as `latest` and push it to Docker Hub.
+- **Stage 6. Deploy to dev**: Deploy the sonarqube branch to the development environment. Review is required for this stage.
+- **Stage 7. Push with tag**: Generate the tag and release it to GitHub. The tag is pushed to Docker Hub.
+- **Stage 8. Deploy to production**: Deploy the released tag to the production environment.
+
+{{ notice >}}
## Hands-on Lab
### Step 1: Create Credentials
-> Note: If there are special characters in your account or password, please encode it using https://www.urlencoder.org/, then paste the encoded result into credentials below.
+1. Log in the KubeSphere console as `project-regular`. Go to your DevOps project and create the following credentials in **Credentials** under **Project Management**. For more information about how to create credentials, see [Credential Management](../../../devops-user-guide/how-to-use/credential-management/).
-1.1. Log in KubeSphere with the account `project-regular`, enter into the created DevOps project and create the following three credentials under **Project Management → Credentials**:
+{{< notice note >}}
+
+If there are any special characters such as `@` and `$` in your account or password, they can cause errors as a pipeline runs because they may not be recognized. In this case, you need to encode your account or password on some third-party websites first, such as [urlencoder](https://www.urlencoder.org/). After that, copy and paste the output for your credential information.
+
+{{ notice >}}
|Credential ID| Type | Where to use |
| --- | --- | --- |
-| dockerhub-id | Account Credentials | DockerHub |
+| dockerhub-id | Account Credentials | Docker Hub |
| github-id | Account Credentials | GitHub |
| demo-kubeconfig | kubeconfig | Kubernetes |
-1.2. We need to create an additional credential `sonar-token` for SonarQube token, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Access SonarQube Console and Create Token](../../how-to-integrate/sonarqube/) to copy the token and paste here. Then press **OK** button.
+2. You need to create an additional credential ID (`sonar-token`) for SonarQube, which is used in stage 3 (SonarQube analysis) mentioned above. Refer to [Create SonarQube Token for New Project](../../../devops-user-guide/how-to-integrate/sonarqube/#create-sonarqube-token-for-new-project) to use the token for the **secret** field below. Click **OK** to finish.
-
+
-In total, we have created four credentials in this step.
+3. In total, you have four credentials in the list.
-
+
-### Step 2: Modify Jenkinsfile in Repository
+### Step 2: Modify Jenkinsfile in GitHub Repository
-#### Fork Project
+1. Log in GitHub. Fork [devops-java-sample](https://github.com/kubesphere/devops-java-sample) from the GitHub repository to your own GitHub account.
-Log in GitHub. Fork the [devops-java-sample](https://github.com/kubesphere/devops-java-sample) from GitHub repository to your own GitHub.
+
-
+2. In your own GitHub repository of **devops-java-sample**, click the file `Jenkinsfile-online` in the root directory.
-#### Edit Jenkinsfile
+
-2.1. After forking the repository to your own GitHub, open the file **Jenkinsfile-online** under root directory.
+3. Click the edit icon on the right to edit environment variables.
-
+
-2.2. Click the editing logo in GitHub UI to edit the values of environment variables.
-
-
-
-| Editing Items | Value | Description |
+| Items | Value | Description |
| :--- | :--- | :--- |
-| DOCKER\_CREDENTIAL\_ID | dockerhub-id | Fill in DockerHub's credential ID to log in your DockerHub. |
-| GITHUB\_CREDENTIAL\_ID | github-id | Fill in the GitHub credential ID to push the tag to GitHub repository. |
-| KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | kubeconfig credential ID is used to access to the running Kubernetes cluster. |
-| REGISTRY | docker.io | Set the web name of docker.io by default for pushing images. |
-| DOCKERHUB\_NAMESPACE | your-dockerhub-account | Replace it to your DockerHub's account name. (It can be the Organization name under the account.) |
-| GITHUB\_ACCOUNT | your-github-account | Change your GitHub account name, such as `https://github.com/kubesphere/`. Fill in `kubesphere` which can also be the account's Organization name. |
-| APP\_NAME | devops-java-sample | Application name |
-| SONAR\_CREDENTIAL\_ID | sonar-token | Fill in the SonarQube token credential ID for code quality test. |
+| DOCKER\_CREDENTIAL\_ID | dockerhub-id | The **Credential ID** you set in KubeSphere for your Docker Hub account. |
+| GITHUB\_CREDENTIAL\_ID | github-id | The **Credential ID** you set in KubeSphere for your GitHub account. It is used to push tags to your GitHub repository. |
+| KUBECONFIG\_CREDENTIAL\_ID | demo-kubeconfig | The **Credential ID** you set in KubeSphere for your kubeconfig. It is used to access a running Kubernetes cluster. |
+| REGISTRY | docker.io | It defaults to `docker.io`, serving as the address of pushing images. |
+| DOCKERHUB\_NAMESPACE | your-dockerhub-account | Replace it with your Docker Hub's account name. It can be the Organization name under the account. |
+| GITHUB\_ACCOUNT | your-github-account | Replace it with your GitHub account name. For example, your GitHub account name is `kubesphere` if your GitHub address is `https://github.com/kubesphere/`. It can also be the account's Organization name. |
+| APP\_NAME | devops-java-sample | The application name. |
+| SONAR\_CREDENTIAL\_ID | sonar-token | The **Credential ID** you set in KubeSphere for the SonarQube token. It is used for code quality test. |
-**Note: The command parameter `-o` of Jenkinsfile's `mvn` indicates that the offline mode is on. This tutorial has downloaded relevant dependencies to save time and to adapt to network interference in certain environments. The offline mode is on by default.**
+{{< notice note >}}
-2.3. After editing the environmental variables, click **Commit changes** at the top of GitHub page, then submit the updates to the sonarqube branch.
+The command parameter `-o` of Jenkinsfile's `mvn` indicates that the offline mode is enabled. Relevant dependencies have already been downloaded in this tutorial to save time and to adapt to network interference in certain environments. The offline mode is on by default.
+
+{{ notice >}}
+
+4. After you edit the environmental variables, click **Commit changes** at the bottom of the page, which updates the file in the SonarQube branch.
+
+
### Step 3: Create Projects
-In this step, we will create two projects, i.e. `kubesphere-sample-dev` and `kubesphere-sample-prod`, which are development environment and production environment respectively.
+You need to create two projects, such as `kubesphere-sample-dev` and `kubesphere-sample-prod`, which represent the development environment and the production environment respectively. Related Deployments and Services of the app will be created automatically in these two projects once the pipeline runs successfully.
-#### Create The First Project
+{{< notice note >}}
-> Tip:The account `project-admin` should be created in advance since it is used as the reviewer of the CI/CD Pipeline.
+The account `project-admin` needs to be created in advance since it is the reviewer of the CI/CD Pipeline. See [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project/) for more information.
-3.1. Use the account `project-admin` to log in KubeSphere. Click **Create** button, then choose **Create a resource project**. Fill in basic information for the project. Click **Next** after complete.
+{{ notice >}}
-- Name: `kubesphere-sample-dev`.
-- Alias: `development environment`.
+1. Use the account `project-admin` to log in KubeSphere. In the same workspace where you create the DevOps project, create two projects as below. Make sure you invite `project-regular` to these two projects with the role of `operator`.
+| Project Name | Alias |
+| ---------------------- | ----------------------- |
+| kubesphere-sample-dev | development environment |
+| kubesphere-sample-prod | production environment |
-3.2. Leave the default values at Advanced Settings. Click **Create**.
+2. Check the project list. You have two projects and one DevOps project as below:
-3.3. Now invite `project-regular` user into `kubesphere-sample-dev`. Choose **Project Settings → Project Members**. Click **Invite Member** to invite `project-regular` and grant this account the role of `operator`.
-
-#### Create the Second Project
-
-Similarly, create a project named `kubesphere-sample-prod` following the steps above. This project is the production environment. Then invite `project-regular` to the project of `kubesphere-sample-prod`, and grant it the role of `operator` as well.
-
-> Note: When the CI/CD pipeline succeeded. You will see the demo application's Deployment and Service have been deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod.` respectively.
-
-
+
### Step 4: Create a Pipeline
-#### Fill in Basic Information
+1. Log out of KubeSphere and log back in as `project-regular`. Go to the DevOps project `demo-devops` and click **Create** to build a new pipeline.
-4.1. Switch the login user to `project-regular`. Enter into the DevOps project `demo-devops`. click **Create** to build a new pipeline.
+
-
+2. Provide the basic information in the dialogue that appears. Name it `jenkinsfile-in-scm` and select a code repository.
-4.2. Fill in the pipeline's basic information in the pop-up window, name it `jenkinsfile-in-scm`, click **Code Repository**.
+
-
+3. In the tab **GitHub**, click **Get Token** to generate a new GitHub token if you do not have one. Paste the token to the box and click **Confirm**.
-#### Add Repository
+
-4.3. Click **Get Token** to generate a new GitHub token if you do not have one. Then paste the token to the edit box.
+
-
+4. Choose your GitHub account. All the repositories related to this token will be listed on the right. Select **devops-java-sample** and click **Select this repo**. Click **Next** to continue.
-
+
-4.4. Click **Confirm**, choose your account. All the code repositories related to this token will be listed on the right. Select **devops-java-sample** and click **Select this repo**, then click **Next**.
+5. In **Advanced Settings**, check the box next to **Discard old branch**. In this tutorial, you can use the default value of **Days to keep old branches** and **Maximum number branches to keep**.
-
+
-#### Advanced Settings
+Discarding old branches means that you will discard the branch record all together. The branch record includes console output, archived artifacts and other relevant metadata of specific branches. Fewer branches mean that you can save the disk space that Jenkins is using. KubeSphere provides two options to determine when old branches are discarded:
-Now we are on the advanced setting page.
+- **Days to keep old branches**. Branches will be discarded after a certain number of days.
+- **Maximum number of branches to keep**. The oldest branches will be discarded after branches reach a certain amount.
-
+{{< notice note >}}
-4.5. In the behavioral strategy, KubeSphere pipeline has set three strategies by default. Since this demo has not applied the strategy of **Discover PR from Forks,**, this strategy can be deleted.
+**Days to keep old branches** and **Maximum number of branches to keep** apply to branches at the same time. As long as a branch meets the condition of either field, it will be discarded. For example, if you specify 2 as the number of retention days and 3 as the maximum number of branches, any branches that exceed either number will be discarded. KubeSphere repopulates these two fields with -1 by default, which means deleted branches will be discarded.
-
+{{ notice >}}
-
+6. In **Behavioral strategy**, KubeSphere offers three strategies by default. You can delete **Discover PR from Forks** as this strategy will not be used in this example. You do not need to change the setting and can use the default value directly.
-4.6. The path is **Jenkinsfile** by default. Please change it to `Jenkinsfile-online`, which is the file name of Jenkinsfile in the repository located in root directory.
+
-> Note: Script path is the Jenkinsfile path in the code repository. It indicates the repository's root directory. If the file location changes, the script path should also be changed.
+As a Jenkins pipeline runs, the Pull Request (PR) submitted by developers will also be regarded as a separate branch.
-
+**Discover Branches**
-4.7. **Scan Repo Trigger** can be customized according to the team's development preference. We set it to `5 minutes`. Click **Create** when complete advanced settings.
+- **Exclude branches that are also filed as PRs**. The source branch is not scanned such as the origin's master branch. These branches need to be merged.
+- **Only branches that are also filed as PRs**. Only scan the PR branch.
+- **All branches**. Pull all the branches from the repository origin.
-
+**Discover PR from Origin**
-
+- **Source code version of PR merged with target branch**. A pipeline is created and runs based on the source code after the PR is merged into the target branch.
+- **Source code version of PR itself**. A pipeline is created and runs based on the source code of the PR itself.
+- **Two pipelines are created when a PR is discovered**. KubeSphere creates two pipelines, one based on the source code after the PR is merged into the target branch, and the other based on the source code of the PR itself.
-#### Run the Pipeline
+7. Scroll down to **Script Path**. The field specifies the Jenkinsfile path in the code repository. It indicates the repository's root directory. If the file location changes, the script path also needs to be changed. Please change it to `Jenkinsfile-online`, which is the file name of Jenkinsfile in the example repository located in the root directory.
-Refresh browser manually or you may need to click `Scan Repository`, then you can find two activities triggered. Or you may want to trigger them manually as the following instructions.
+
-4.8. Click **Run** on the right. According to the **Behavioral Strategy**, it will load the branches that have Jenkinsfile. Set the value of branch as `sonarqube`. Since there is no default value in the Jenkinsfile file, put in a tag number in the **TAG_NAME** such as `v0.0.1`. Click **OK** to trigger a new activity.
+8. In **Scan Repo Trigger**, check **If not, scan regularly** and set the interval to **5 minutes**. Click **Create** to finish.
-> Note: TAG\_NAME is used to generate release and images with tag in GitHub and DockerHub. Please notice that `TAG_NAME` should not duplicate the existing `tag` name in the code repository. Otherwise the pipeline can not run.
+
-
+{{< notice note >}}
-At this point, the pipeline for the sonarqube branch is running.
+You can set a specific interval to allow pipelines to scan remote repositories, so that any code updates or new PRs can be detected based on the strategy you set in **Behavioral strategy**.
-> Note: Click **Branch** to switch to the branch list and review which branches are running. The branch here is determined by the **Behavioral Strategy.**
+{{ notice >}}
-
+### Step 5: Run a Pipeline
-#### Review Pipeline
+1. After a pipeline is created, it displays in the list below. Click it to go to its detail page.
-When the pipeline runs to the step of `input`
-it will pause. You need to click **Continue** manually. Please note that there are three stages defined in the Jenkinsfile-online. Therefore, the pipeline will be reviewed three times in the three stages of `deploy to dev, push with tag, deploy to production`.
+
-
+2. Under **Activity**, three branches are being scanned. Click **Run** on the right and the pipeline runs based on the behavioral strategy you set. Select **sonarqube** from the drop-down list and add a tag number such as `v0.0.2`. Click **OK** to trigger a new activity.
-> Note: In real development or production scenario, it requires someone who has higher authority (e.g. release manager) to review the pipeline and the image, as well as the code analysis result. They have the authority to determine whether to approve push and deploy. In Jenkinsfile, the `input` step supports you to specify who to review the pipeline. If you want to specify a user `project-admin` to review, you can add a field in the Jenkinsfile. If there are multiple users, you need to use commas to separate them as follows:
+
+
+
+
+{{< notice note >}}
+
+- If you do need see any activity on this page, you need to refresh your browser manually or click **Scan Repository** from the drop-down menu (the **More** button).
+- The tag name is used to generate releases and images with the tag in GitHub and Docker Hub. An existing tag name cannot be used again for the field TAG_NAME. Otherwise, the pipeline will not be running successfully.
+
+{{ notice >}}
+
+3. Wait for a while and you can see some activities stop and some fail. Click the first one to view details.
+
+
+
+{{< notice note >}}
+
+Activity failures may be caused by different factors. In this example, only the Jenkinsfile of the branch sonarqube is changed as you edit the environment variables in it in the steps above. On the contrary, these variables in the dependency and master branch remain changed (namely, wrong GitHub and Docker Hub account), resulting in the failure. You can click it and inspect its logs to see details. Other reasons for failures may be network issues, incorrect coding in the Jenkinsfile and so on.
+
+{{ notice >}}
+
+4. The pipeline pauses at the stage `deploy to dev`. You need to click **Proceed** manually. Note that the pipeline will be reviewed three times as `deploy to dev`, `push with tag`, and `deploy to production` are defined in the Jenkinsfile respectively.
+
+
+
+In a development or production environment, it requires someone who has higher authority (e.g. release manager) to review the pipeline, images, as well as the code analysis result. They have the authority to determine whether the pipeline can go to the next stage. In the Jenkinsfile, you use the section `input` to specify who reviews the pipeline. If you want to specify a user (e.g. `project-admin`) to review it, you can add a field in the Jenkinsfile. If there are multiple users, you need to use commas to separate them as follows:
```groovy
···
@@ -219,71 +233,83 @@ input(id: 'release-image-with-tag', message: 'release image with tag?', submitte
···
```
-### Step 5: Check Pipeline Status
+### Step 6: Check Pipeline Status
-5.1. Click into **Activity → sonarqube → Task Status**, you can see the pipeline running status. Please note that the pipeline will keep initializing for several minutes when the creation just completed. There are eight stages in the sample pipeline and they have been defined individually in [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online).
+1. In **Task Status**, you can see how a pipeline is running. Please note that the pipeline will keep initializing for several minutes after it is just created. There are eight stages in the sample pipeline and they have been defined separately in [Jenkinsfile-online](https://github.com/kubesphere/devops-java-sample/blob/sonarqube/Jenkinsfile-online).
-
+
-5.2. Check the pipeline running logs by clicking **Show Logs** at the top right corner. The page shows dynamic logs outputs, operating status and time etc.
+2. Check the pipeline running logs by clicking **Show Logs** in the top right corner. You can see the dynamic log output of the pipeline, including any errors that may stop the pipeline from running. For each stage, you click it to inspect logs, which can be downloaded to your local machine for further analysis.
-For each step, click specific stage on the left to inspect the logs. The logs can be downloaded to local for further analysis.
+
-
+### Step 7: Verify Results
-### Step 6: Verify Pipeline Running Results
+1. Once you successfully executed the pipeline, click **Code Quality** to check the results through SonarQube as follows.
-6.1. Once you successfully executed the pipeline, click `Code Quality` to check the results through SonarQube as the follows (reference only).
+
-
+
-6.2. The Docker image built by the pipeline has been successfully pushed to DockerHub, since we defined `push to DockerHub` stage in Jenkinsfile-online. In DockerHub you will find the image with tag v0.0.1 that we configured before running the pipeline, also you will find the images with tags`SNAPSHOT-sonarqube-6`(SNAPSHOT-branch-serial number) and `latest` have been pushed to DockerHub.
+2. The Docker image built through the pipeline has also been successfully pushed to Docker Hub, as it is defined in the Jenkinsfile. In Docker Hub, you will find the image with the tag `v0.0.2` that is specified before the pipeline runs.
-
+
-At the same time, a new tag and a new release have been generated in GitHub.
+3. At the same time, a new tag and a new release have been generated in GitHub.
-
+
-The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` as deployment and service.
+4. The sample application will be deployed to `kubesphere-sample-dev` and `kubesphere-sample-prod` with corresponding Deployments and Services created. Go to these two projects and here are the expected result:
| Environment | URL | Namespace | Deployment | Service |
| :--- | :--- | :--- | :--- | :--- |
-| Dev | `http://{NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev |
+| Development | `http://{NodeIP}:{$30861}` | kubesphere-sample-dev | ks-sample-dev | ks-sample-dev |
| Production | `http://{$NodeIP}:{$30961}` | kubesphere-sample-prod | ks-sample | ks-sample |
-6.3. Enter into these two projects, you can find the application's resources have been deployed to Kubernetes successully. For example, lets verify the Deployments and Services under project `kubesphere-sample-dev`:
-
#### Deployments
-
+
#### Services
-
+
-### Step 7: Visit Sample Service
+{{< notice note >}}
-7.1. You can switch to use `admin` account to open **web kubectl** from **Toolbox**. Enter into project `kubesphere-sample-dev`, select **Application Workloads → Services** and click into `ks-sample-dev` service.
+You may need to open the port in your security groups so that you can access the app with the URL.
-
+{{ notice >}}
-7.2. Open **web kubectl** from **Toolbox**, try to access as the following:
+### Step 8: Access Sample Service
-> Note: curl Endpoints or {$Virtual IP}:{$Port} or {$Node IP}:{$NodePort}
+1. To access the service, log in KubeSphere as `admin` to use the **web kubectl** from **Toolbox**. Go to the project `kubesphere-sample-dev`, and select `ks-sample-dev` in **Services** under **Application Workloads**. The endpoint can be used to access the service.
+
+
+
+
+
+2. Use the **web kubectl** from **Toolbox** in the bottom right corner by executing the following command:
+
+```bash
+$ curl 10.10.128.169:8080
+```
+
+3. Expected output:
```bash
-$ curl 10.233.102.188:8080
Really appreciate your star, that's the power of our life.
```
-7.3. Similarly, you can test the service in project `kubesphere-sample-pro`
+{{< notice note >}}
-> Note: curl Endpoints or {$Virtual IP}:{$Port} or {$Node IP}:{$NodePort}
+Use `curl` endpoints or {$Virtual IP}:{$Port} or {$Node IP}:{$NodePort}
+
+{{ notice >}}
+
+4. Similarly, you can test the service in the project `kubesphere-sample-prod` and you will see the same result.
```bash
-$ curl 10.233.102.188:8080
+$ curl 10.10.128.170:8080
Really appreciate your star, that's the power of our life.
```
-Configurations! You are familiar with KubeSphere DevOps pipeline, and you can continue to learn how to build CI/CD pipeline with a graphical panel and visualize your workflow in the next tutorial.
\ No newline at end of file
diff --git a/content/en/docs/installing-on-kubernetes/_index.md b/content/en/docs/installing-on-kubernetes/_index.md
index b0647fcf5..2f67c4282 100644
--- a/content/en/docs/installing-on-kubernetes/_index.md
+++ b/content/en/docs/installing-on-kubernetes/_index.md
@@ -39,9 +39,9 @@ Make sure your environment where existing Kubernetes clusters run meets the prer
## Installing on Hosted Kubernetes
-### [Deploy KubeSphere on Oracle OKE](../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/)
+### [Deploy KubeSphere on AKS](../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/)
-Learn how to deploy KubeSphere on Oracle Cloud Infrastructure Container Engine for Kubernetes.
+Learn how to deploy KubeSphere on Azure Kubernetes Service.
### [Deploy KubeSphere on AWS EKS](../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/)
@@ -55,14 +55,14 @@ Learn how to deploy KubeSphere on DigitalOcean.
Learn how to deploy KubeSphere on Google Kubernetes Engine.
-### [Deploy KubeSphere on AKS](../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/)
-
-Learn how to deploy KubeSphere on Azure Kubernetes Service.
-
### [Deploy KubeSphere on Huawei CCE](../installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce/)
Learn how to deploy KubeSphere on Huawei Cloud Container Engine.
+### [Deploy KubeSphere on Oracle OKE](../installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/)
+
+Learn how to deploy KubeSphere on Oracle Cloud Infrastructure Container Engine for Kubernetes.
+
## Installing on On-premises Kubernetes
### [Air-gapped Installation](../installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/)
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce.md
index 70b7efc30..64c030c97 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-huawei-cce.md
@@ -3,7 +3,7 @@ title: "Deploy KubeSphere on Huawei CCE"
keywords: "KubeSphere, Kubernetes, installation, huawei, cce"
description: "How to install KubeSphere on Huawei CCE."
-weight: 2275
+weight: 2270
---
This guide walks you through the steps of deploying KubeSphere on [Huaiwei CCE](https://support.huaweicloud.com/en-us/qs-cce/cce_qs_0001.html).
@@ -14,7 +14,7 @@ This guide walks you through the steps of deploying KubeSphere on [Huaiwei CCE](
First, create a Kubernetes cluster based on the requirements below.
-- KubeSphere 3.0.0 supports Kubernetes `1.15.x`, `1.16.x`, `1.17.x`, and `1.18.x` by default. Select a version and create the cluster, e.g. `v1.15.11` or `v1.17.9`.
+- KubeSphere 3.0.0 supports Kubernetes `1.15.x`, `1.16.x`, `1.17.x`, and `1.18.x`. Select a version and create the cluster, e.g. `v1.15.11` or `v1.17.9`.
- Ensure the cloud computing network for your Kubernetes cluster works, or use an elastic IP when you use “Auto Create” or “Select Existing”. You can also configure the network after the cluster is created. Refer to Configure [NAT Gateway](https://support.huaweicloud.com/en-us/productdesc-natgateway/en-us_topic_0086739762.html).
- Select `s3.xlarge.2` `4-core|8GB` for nodes and add more if necessary (3 and more nodes are required for a production environment).
@@ -23,7 +23,7 @@ First, create a Kubernetes cluster based on the requirements below.
- Go to `Resource Management` > `Cluster Management` > `Basic Information` > `Network`, and bind `Public apiserver`.
- Select `kubectl` on the right column, go to `Download kubectl configuration file`, and click `Click here to download`, then you will get a public key for kubectl.
-
+ 
After you get the configuration file for kubectl, use kubectl command lines to verify the connection to the cluster.
@@ -41,7 +41,7 @@ Server Version: version.Info{Major:"1", Minor:"17+", GitVersion:"v1.17.9-r0-CCE2
Huawei CCE built-in Everest CSI provides StorageClass `csi-disk` which uses SATA (normal I/O) by default, but the actual disk that is used for Kubernetes clusters is either SAS (high I/O) or SSD (extremely high I/O). Therefore, it is suggested that you create an extra StorageClass and set it as default. Refer to the official document - [Use kubectl to create a cloud storage](https://support.huaweicloud.com/en-us/usermanual-cce/cce_01_0044.html).
-{{ notice >}}
+{{ notice >}}
Below is an example to create a SAS (high I/O) for its corresponding StorageClass.
@@ -76,38 +76,36 @@ For how to set up or cancel a default StorageClass, refer to Kubernetes official
Use [ks-installer](https://github.com/kubesphere/ks-installer) to deploy KubeSphere on an existing Kubernetes cluster. Execute the following commands directly for a minimal installation:
```bash
-$ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
+kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```bash
-$ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
+kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
```
Go to `Workload` > `Pod`, and check the running status of the pod in `kubesphere-system` of its namespace to understand the minimal deployment of KubeSphere. Check `ks-console-xxxx` of the namespace to understand the availability of KubeSphere console.
-
+ 
### Expose KubeSphere Console
-Check the running status of Pod in `kubesphere-system` namespace and make sure the basic components of KubeSphere are running. Then expose KubeSphere console.
+Check the running status of Pods in `kubesphere-system` namespace and make sure the basic components of KubeSphere are running. Then expose KubeSphere console.
Go to `Resource Management` > `Network` and choose the service in `ks-console`. It is suggested that you choose `LoadBalancer` (Public IP is required). The configuration is shown below.
-
+ 
Default settings are OK for other detailed configurations. You can also set it based on your needs.
-
+ 
After you set LoadBalancer for KubeSphere console, you can visit it via the given address. Go to KubeSphere login page and use the default account (username `admin` and pw `P@88w0rd`) to log in.
-
+ 
## Enable Pluggable Components (Optional)
The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details.
-{{< notice note >}}
+{{< notice note >}}
Before you use Istio-based features of KubeSphere, you have to delete `applications.app.k8s.io` built in Huawei CCE due to the CRD conflict. You can run the command `kubectl delete crd applications.app.k8s.io` directly to delete it.
@@ -115,4 +113,4 @@ Before you use Istio-based features of KubeSphere, you have to delete `applicati
After your component is installed, go to the **Cluster Management** page, and you will see the interface below. You can check the status of your component in **Components**.
-
+ 
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md
index 56ef6b7dd..f1e8a649b 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks.md
@@ -3,7 +3,7 @@ title: "Deploy KubeSphere on AKS"
keywords: "KubeSphere, Kubernetes, Installation, Azure, AKS"
description: "How to deploy KubeSphere on AKS"
-weight: 2270
+weight: 2247
---
This guide walks you through the steps of deploying KubeSphere on [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/).
@@ -13,6 +13,7 @@ This guide walks you through the steps of deploying KubeSphere on [Azure Kuberne
Azure can help you implement infrastructure as code by providing resource deployment automation options. Commonly adopted tools include [ARM templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) and [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/what-is-azure-cli?view=azure-cli-latest). In this guide, we will use Azure CLI to create all the resources that are needed for the installation of KubeSphere.
### Use Azure Cloud Shell
+
You don't have to install Azure CLI on your machine as Azure provides a web-based terminal. Click the Cloud Shell button on the menu bar at the upper right corner in Azure portal.

@@ -20,25 +21,28 @@ You don't have to install Azure CLI on your machine as Azure provides a web-base
Select **Bash** Shell.

+
### Create a Resource Group
-An Azure resource group is a logical group in which Azure resources are deployed and managed. The following example creates a resource group named `KubeSphereRG` in the location `westus`.
+An Azure resource group is a logical group in which Azure resources are deployed and managed. The following example creates a resource group named `KubeSphereRG` in the location `westus`.
```bash
az group create --name KubeSphereRG --location westus
```
### Create an AKS Cluster
+
Use the command `az aks create` to create an AKS cluster. The following example creates a cluster named `KuberSphereCluster` with three nodes. This will take several minutes to complete.
```bash
az aks create --resource-group KubeSphereRG --name KuberSphereCluster --node-count 3 --enable-addons monitoring --generate-ssh-keys
```
+
{{< notice note >}}
You can use `--node-vm-size` or `-s` option to change the size of Kubernetes nodes. Default: Standard_DS2_v2 (2vCPU, 7GB memory). For more options, see [az aks create](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-create).
-{{ notice >}}
+{{ notice >}}
### Connect to the Cluster
@@ -53,12 +57,14 @@ kebesphere@Azure:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
aks-nodepool1-23754246-vmss000000 Ready agent 38m v1.16.13
```
+
### Check Azure Resources in the Portal
+
After you execute all the commands above, you can see there are 2 Resource Groups created in Azure Portal.

-Azure Kubernetes Services itself will be placed in KubeSphereRG.
+Azure Kubernetes Services itself will be placed in KubeSphereRG.

@@ -67,13 +73,15 @@ All the other Resources will be placed in MC_KubeSphereRG_KuberSphereCluster_wes

## Deploy KubeSphere on AKS
-To start deploying KubeSphere, use the following command.
+
+To start deploying KubeSphere, use the following commands.
+
```bash
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
-```bash
+
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
```
+
You can inspect the logs of installation through the following command:
```bash
@@ -83,10 +91,13 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
## Access KubeSphere Console
To access KubeSphere console from a public IP address, you need to change the service type to `LoadBalancer`.
+
```bash
kubectl edit service ks-console -n kubesphere-system
```
+
Find the following section and change the type to `LoadBalancer`.
+
```bash
spec:
clusterIP: 10.0.78.113
@@ -106,12 +117,15 @@ spec:
status:
loadBalancer: {}
```
+
After saving the configuration of ks-console service, you can use the following command to get the public IP address (under `EXTERNAL-IP`). Use the IP address to access the console with the default account and password (`admin/P@88w0rd`).
+
```bash
kebesphere@Azure:~$ kubectl get svc/ks-console -n kubesphere-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ks-console LoadBalancer 10.0.181.93 13.86.xxx.xxx 80:30194/TCP 13m 6379/TCP 10m
```
+
## Enable Pluggable Components (Optional)
-The example above demonstrates the process of a default minimal installation. For pluggable components, you can enable them either before or after the installation. See [Enable Pluggable Components](../../../pluggable-components/) for details.
\ No newline at end of file
+The example above demonstrates the process of a default minimal installation. For pluggable components, you can enable them either before or after the installation. See [Enable Pluggable Components](../../../pluggable-components/) for details.
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md
index 63e4134ba..f59e785b8 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do.md
@@ -8,7 +8,7 @@ weight: 2265

-This guide walks you through the steps of deploying KubeSphere on [ DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/).
+This guide walks you through the steps of deploying KubeSphere on [DigitalOcean Kubernetes](https://www.digitalocean.com/products/kubernetes/).
## Prepare a DOKS Cluster
@@ -17,6 +17,7 @@ A Kubernetes cluster in DO is a prerequisite for installing KubeSphere. Go to yo

You need to select:
+
1. Kubernetes version (e.g. *1.18.6-do.0*)
2. Datacenter region (e.g. *Frankfurt*)
3. VPC network (e.g. *default-fra1*)
@@ -25,13 +26,13 @@ You need to select:

-{{< notice note >}}
+{{< notice note >}}
- Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
- 2 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
-- The machine type Standard / 4 GB / 2 vCPUs is for minimal installation. If you plan to enable several pluggable components or use the cluster for production, you can upgrade your nodes to a more powerfull type (such as CPU-Optimized / 8 GB / 4 vCPUs). It seems that DigitalOcean provisions the master nodes based on the type of the worker nodes, and for Standard ones the API server can become unresponsive quite fast.
+- The machine type Standard / 4 GB / 2 vCPUs is for minimal installation. If you plan to enable several pluggable components or use the cluster for production, you can upgrade your nodes to a more powerfull type (such as CPU-Optimized / 8 GB / 4 vCPUs). It seems that DigitalOcean provisions the master nodes based on the type of the worker nodes, and for Standard ones the API server can become unresponsive quite soon.
-{{ notice >}}
+{{ notice >}}
When the cluster is ready, you can download the config file for kubectl.
@@ -41,13 +42,11 @@ When the cluster is ready, you can download the config file for kubectl.
Now that the cluster is ready, you can install KubeSphere following the steps below:
-- Install KubeSphere using kubectl. The following command is only for the default minimal installation.
+- Install KubeSphere using kubectl. The following commands are only for the default minimal installation.
```bash
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
- ```
-
- ```bash
+
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
```
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md
index 7fe4f74e6..77561bef6 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks.md
@@ -7,19 +7,22 @@ weight: 2265
---
This guide walks you through the steps of deploying KubeSphere on [AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html).
+
## Install the AWS CLI
-Amazon EKS does not have a web terminal like GKE, so we must install the AWS CLI first. Below is an example for macOS and please refer to [Getting Started EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html) for other operating systems.
+
+First we need to install the AWS CLI. Below is an example for macOS and please refer to [Getting Started EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html) for other operating systems.
+
```shell
pip3 install awscli --upgrade --user
```
+
Check the installation with `aws --version`.

## Prepare an EKS Cluster
1. A standard Kubernetes cluster in AWS is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster.
-
-
+ 
2. On the **Configure cluster** page, fill in the following fields:

@@ -52,11 +55,11 @@ Check the installation with `aws --version`.
- Private: Enables only private access to your cluster's Kubernetes API server endpoint. Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint.
- {{< notice note >}}
+ {{< notice note >}}
- If you created a VPC without outbound internet access, then you must enable private access.
+ If you created a VPC without outbound internet access, then you must enable private access.
- {{ notice >}}
+ {{ notice >}}
- Public and private: Enables public and private access.
@@ -77,8 +80,8 @@ Check the installation with `aws --version`.

{{< notice note >}}
+
- Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
-- Ubuntu is used for the operating system here as an example. For more information on supported systems, see [Overview](../../../installing-on-kubernetes/introduction/overview/).
- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
- The machine type t3.medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources.
- For other settings, you can change them as well based on your own needs or use the default value.
@@ -88,83 +91,83 @@ Check the installation with `aws --version`.
8. When the EKS cluster is ready, you can connect to the cluster with kubectl.
## Configure kubectl
+
We will use the kubectl command-line utility for communicating with the cluster API server. First, get the kubeconfig of the EKS cluster created just now.
+
1. Configure your AWS CLI credentials.
-```shell
-$ aws configure
-AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE
-AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
-Default region name [None]: region-code
-Default output format [None]: json
-```
+ ```shell
+ $ aws configure
+ AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE
+ AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
+ Default region name [None]: region-code
+ Default output format [None]: json
+ ```
+
2. Create your kubeconfig file with the AWS CLI.
-```shell
-aws eks --region us-west-2 update-kubeconfig --name cluster_name
-```
- - By default, the resulting configuration file is created at the default kubeconfig path (`.kube/config`) in your home directory or merged with an existing kubeconfig at that location. You can specify another path with the `--kubeconfig` option.
+ ```shell
+ aws eks --region us-west-2 update-kubeconfig --name cluster_name
+ ```
- - You can specify an IAM role ARN with the `--role-arn` option to use for authentication when you issue kubectl commands. Otherwise, the IAM entity in your default AWS CLI or SDK credential chain is used. You can view your default AWS CLI or SDK identity by running the `aws sts get-caller-identity` command.
+ - By default, the resulting configuration file is created at the default kubeconfig path (`.kube/config`) in your home directory or merged with an existing kubeconfig at that location. You can specify another path with the `--kubeconfig` option.
-For more information, see the help page with the `aws eks update-kubeconfig help` command or see [update-kubeconfig](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html) in the *AWS CLI Command Reference*.
+ - You can specify an IAM role ARN with the `--role-arn` option to use for authentication when you issue kubectl commands. Otherwise, the IAM entity in your default AWS CLI or SDK credential chain is used. You can view your default AWS CLI or SDK identity by running the `aws sts get-caller-identity` command.
+
+ For more information, see the help page with the `aws eks update-kubeconfig help` command or see [update-kubeconfig](https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html) in the *AWS CLI Command Reference*.
3. Test your configuration.
-```shell
-kubectl get svc
-```
+ ```shell
+ kubectl get svc
+ ```
## Install KubeSphere on EKS
-- Install KubeSphere using kubectl. The following command is only for the default minimal installation.
+- Install KubeSphere using kubectl. The following commands are only for the default minimal installation.
-```bash
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
-
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-- Create a local **cluster-configuration.yaml** file.
-```shell
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
-```
-
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
+ ```
- Inspect the logs of installation:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
- When the installation finishes, you can see the following message:
-```bash
-#####################################################
-### Welcome to KubeSphere! ###
-#####################################################
-Account: admin
-Password: P@88w0rd
-NOTES:
- 1. After logging into the console, please check the
- monitoring status of service components in
- the "Cluster Management". If any service is not
- ready, please wait patiently until all components
- are ready.
- 2. Please modify the default password after login.
-#####################################################
-https://kubesphere.io 2020-xx-xx xx:xx:xx
-```
+ ```bash
+ #####################################################
+ ### Welcome to KubeSphere! ###
+ #####################################################
+ Account: admin
+ Password: P@88w0rd
+ NOTES:
+ 1. After logging into the console, please check the
+ monitoring status of service components in
+ the "Cluster Management". If any service is not
+ ready, please wait patiently until all components
+ are ready.
+ 2. Please modify the default password after login.
+ #####################################################
+ https://kubesphere.io 2020-xx-xx xx:xx:xx
+ ```
## Access KubeSphere Console
Now that KubeSphere is installed, you can access the web console of KubeSphere by following the step below.
- Check the service of KubeSphere console through the following command.
-```shell
-kubectl get svc -n kubesphere-system
-```
-- Edit the configuration of the service **ks-console** by executing `kubectl edit ks-console` and change `type` from `NodePort` to `LoadBalancer`. Save the file when you finish.
+ ```shell
+ kubectl get svc -n kubesphere-system
+ ```
+
+- Edit the configuration of the service **ks-console** by executing `kubectl edit ks-console` and change `type` from `NodePort` to `LoadBalancer`. Save the file when you finish.

- Run `kubectl get svc -n kubesphere-system` and get your external IP.
@@ -174,7 +177,7 @@ kubectl get svc -n kubesphere-system
- Log in the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard as shown in the following image.
-
+ 
## Enable Pluggable Components (Optional)
@@ -183,4 +186,3 @@ The example above demonstrates the process of a default minimal installation. To
## Reference
[Getting started with the AWS Management Console](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
-
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md
index b3b1ae9a1..53665ddce 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke.md
@@ -14,99 +14,94 @@ This guide walks you through the steps of deploying KubeSphere on [Google Kubern
- A standard Kubernetes cluster in GKE is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster.
-
+ 
- In **Cluster basics**, select a Master version. The static version `1.15.12-gke.2` is used here as an example.
-
+ 
- In **default-pool** under **Node Pools**, define 3 nodes in this cluster.
-
+ 
- Go to **Nodes**, select the image type and set the Machine Configuration as below. When you finish, click **Create**.
-
+ 
-{{< notice note >}}
+{{< notice note >}}
- Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
-- Ubuntu is used for the operating system here as an example. For more information on supported systems, see Overview.
- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
- The machine type e2-medium (2 vCPU, 4GB memory) is for minimal installation. If you want to enable pluggable components or use the cluster for production, please select a machine type with more resources.
- For other settings, you can change them as well based on your own needs or use the default value.
-{{ notice >}}
+{{ notice >}}
- When the GKE cluster is ready, you can connect to the cluster with Cloud Shell.
-
-
+ 
## Install KubeSphere on GKE
-- Install KubeSphere using kubectl. The following command is only for the default minimal installation.
+- Install KubeSphere using kubectl. The following commands are only for the default minimal installation.
-```bash
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```bash
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
-```
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
+ ```
- Inspect the logs of installation:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
- When the installation finishes, you can see the following message:
-```bash
-#####################################################
-### Welcome to KubeSphere! ###
-#####################################################
-Console: http://10.128.0.44:30880
-Account: admin
-Password: P@88w0rd
-NOTES:
- 1. After logging into the console, please check the
- monitoring status of service components in
- the "Cluster Management". If any service is not
- ready, please wait patiently until all components
- are ready.
- 2. Please modify the default password after login.
-#####################################################
-https://kubesphere.io 2020-xx-xx xx:xx:xx
-```
+ ```bash
+ #####################################################
+ ### Welcome to KubeSphere! ###
+ #####################################################
+ Console: http://10.128.0.44:30880
+ Account: admin
+ Password: P@88w0rd
+ NOTES:
+ 1. After logging into the console, please check the
+ monitoring status of service components in
+ the "Cluster Management". If any service is not
+ ready, please wait patiently until all components
+ are ready.
+ 2. Please modify the default password after login.
+ #####################################################
+ https://kubesphere.io 2020-xx-xx xx:xx:xx
+ ```
## Access KubeSphere Console
-Now that KubeSphere is installed, you can access the web console of KubeSphere by following the step below.
+Now that KubeSphere is installed, you can access the web console of KubeSphere by following the steps below.
- In **Services & Ingress**, select the service **ks-console**.
-
+ 
- In **Service details**, click **Edit** and change the type from `NodePort` to `LoadBalancer`. Save the file when you finish.
-
+ 
- Access the web console of KubeSphere using the endpoint generated by GKE.
+ 
-
+ {{< notice tip >}}
-{{< notice tip >}}
+ Instead of changing the service type to `LoadBalancer`, you can also access KubeSphere console via `NodeIP:NodePort` (service type set to `NodePort`). You may need to open port `30880` in firewall rules.
-Instead of changing the service type to `LoadBalancer`, you can also access KubeSphere console via `NodeIP:NodePort` (service type set to `NodePort`). You may need to open port `30880` in firewall rules.
-
-{{ notice >}}
+ {{ notice >}}
- Log in the console with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard as shown in the following image.
-
+ 
## Enable Pluggable Components (Optional)
diff --git a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md
index 5b7cd6789..ece2222e3 100644
--- a/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md
+++ b/content/en/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke.md
@@ -3,7 +3,7 @@ title: "Deploy KubeSphere on Oracle OKE"
keywords: 'Kubernetes, KubeSphere, OKE, Installation, Oracle-cloud'
description: 'How to install KubeSphere on Oracle OKE'
-weight: 2247
+weight: 2275
---
This guide walks you through the steps of deploying KubeSphere on [Oracle Kubernetes Engine](https://www.oracle.com/cloud/compute/container-engine-kubernetes.html).
@@ -12,97 +12,95 @@ This guide walks you through the steps of deploying KubeSphere on [Oracle Kubern
- A standard Kubernetes cluster in OKE is a prerequisite of installing KubeSphere. Go to the navigation menu and refer to the image below to create a cluster.
-
+ 
- In the pop-up window, select **Quick Create** and click **Launch Workflow**.
-
+ 
-{{< notice note >}}
+ {{< notice note >}}
-In this example, **Quick Create** is used for demonstration which will automatically create all the resources necessary for a cluster in Oracle Cloud. If you select **Custom Create**, you need to create all the resources (such as VCN and LB Subnets) yourself.
+ In this example, **Quick Create** is used for demonstration which will automatically create all the resources necessary for a cluster in Oracle Cloud. If you select **Custom Create**, you need to create all the resources (such as VCN and LB Subnets) by yourself.
-{{ notice >}}
+ {{ notice >}}
- Next, you need to set the cluster with basic information. Here is an example for your reference. When you finish, click **Next**.
-
+ 
-{{< notice note >}}
+ {{< notice note >}}
-- Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
-- It is recommended that you should select **Public** for **Visibility Type**, which will assign a public IP address for every node. The IP address can be used later to access the web console of KubeSphere.
-- In Oracle Cloud, a Shape is a template that determines the number of CPUs, amount of memory, and other resources that are allocated to an instance. `VM.Standard.E2.2 (2 CPUs and 16G Memory)` is used in this example. For more information, see [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard).
-- 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
+ - Supported Kubernetes versions for KubeSphere 3.0.0: 1.15.x, 1.16.x, 1.17.x, 1.18.x.
+ - It is recommended that you should select **Public** for **Visibility Type**, which will assign a public IP address for every node. The IP address can be used later to access the web console of KubeSphere.
+ - In Oracle Cloud, a Shape is a template that determines the number of CPUs, amount of memory, and other resources that are allocated to an instance. `VM.Standard.E2.2 (2 CPUs and 16G Memory)` is used in this example. For more information, see [Standard Shapes](https://docs.cloud.oracle.com/en-us/iaas/Content/Compute/References/computeshapes.htm#vmshapes__vm-standard).
+ - 3 nodes are included in this example. You can add more nodes based on your own needs especially in a production environment.
-{{ notice >}}
+ {{ notice >}}
- Review cluster information and click **Create Cluster** if no adjustment is needed.
-
+ 
- After the cluster is created, click **Close**.
-
+ 
- Make sure the Cluster Status is **Active** and click **Access Cluster**.
-
+ 
- In the pop-up window, select **Cloud Shell Access** to access the cluster. Click **Launch Cloud Shell** and copy the code provided by Oracle Cloud.
-
+ 
- In Cloud Shell, paste the command so that we can execute the installation command later.
-
+ 
-{{< notice warning >}}
+ {{< notice warning >}}
-If you do not copy and execute the command above, you cannot proceed with the steps below.
+ If you do not copy and execute the command above, you cannot proceed with the steps below.
-{{ notice >}}
+ {{ notice >}}
## Install KubeSphere on OKE
-- Install KubeSphere using kubectl. The following command is only for the default minimal installation.
+- Install KubeSphere using kubectl. The following commands are only for the default minimal installation.
-```bash
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```bash
-kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
-```
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
+ ```
- Inspect the logs of installation:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
- When the installation finishes, you can see the following message:
-```bash
-#####################################################
-### Welcome to KubeSphere! ###
-#####################################################
+ ```bash
+ #####################################################
+ ### Welcome to KubeSphere! ###
+ #####################################################
-Console: http://10.0.10.2:30880
-Account: admin
-Password: P@88w0rd
+ Console: http://10.0.10.2:30880
+ Account: admin
+ Password: P@88w0rd
-NOTES:
- 1. After logging into the console, please check the
- monitoring status of service components in
- the "Cluster Management". If any service is not
- ready, please wait patiently until all components
- are ready.
- 2. Please modify the default password after login.
+ NOTES:
+ 1. After logging into the console, please check the
+ monitoring status of service components in
+ the "Cluster Management". If any service is not
+ ready, please wait patiently until all components
+ are ready.
+ 2. Please modify the default password after login.
-#####################################################
-https://kubesphere.io 20xx-xx-xx xx:xx:xx
-```
+ #####################################################
+ https://kubesphere.io 20xx-xx-xx xx:xx:xx
+ ```
## Access KubeSphere Console
@@ -110,43 +108,42 @@ Now that KubeSphere is installed, you can access the web console of KubeSphere e
- Check the service of KubeSphere console through the following command:
-```bash
-kubectl get svc -n kubesphere-system
-```
+ ```bash
+ kubectl get svc -n kubesphere-system
+ ```
- The output may look as below. You can change the type to `LoadBalancer` so that the external IP address can be exposed.
-
+ 
-{{< notice tip >}}
+ {{< notice tip >}}
-It can be seen above that the service `ks-console` is being exposed through NodePort, which means you can access the console directly via `NodeIP:NodePort` (the public IP address of any node is applicable). You may need to open port `30880` in firewall rules.
+ It can be seen above that the service `ks-console` is being exposed through NodePort, which means you can access the console directly via `NodeIP:NodePort` (the public IP address of any node is applicable). You may need to open port `30880` in firewall rules.
-{{ notice >}}
+ {{ notice >}}
- Execute the command to edit the service configuration.
-```bash
-kubectl edit svc ks-console -o yaml -n kubesphere-system
-```
+ ```bash
+ kubectl edit svc ks-console -o yaml -n kubesphere-system
+ ```
- Navigate to `type` and change `NodePort` to `LoadBalancer`. Save the configuration after you finish.
-
+ 
- Execute the following command again and you can see the IP address displayed as below.
-```bash
-kubectl get svc -n kubesphere-system
-```
+ ```bash
+ kubectl get svc -n kubesphere-system
+ ```
-
+ 
- Log in the console through the external IP address with the default account and password (`admin/P@88w0rd`). In the cluster overview page, you can see the dashboard shown below:
-
+ 
## Enable Pluggable Components (Optional)
The example above demonstrates the process of a default minimal installation. To enable other components in KubeSphere, see [Enable Pluggable Components](../../../pluggable-components/) for more details.
-
diff --git a/content/en/docs/installing-on-kubernetes/introduction/overview.md b/content/en/docs/installing-on-kubernetes/introduction/overview.md
index 59c12de96..17a5d82ea 100644
--- a/content/en/docs/installing-on-kubernetes/introduction/overview.md
+++ b/content/en/docs/installing-on-kubernetes/introduction/overview.md
@@ -13,7 +13,7 @@ As part of KubeSphere's commitment to provide a plug-and-play architecture for u
This section gives you an overview of the general steps of installing KubeSphere on Kubernetes. For more information about the specific way of installation in different environments, see Installing on Hosted Kubernetes and Installing on On-premises Kubernetes.
-{{< notice note >}}
+{{< notice note >}}
Please read [Prerequisites](../prerequisites/) before you install KubeSphere on existing Kubernetes clusters.
@@ -27,18 +27,10 @@ After you make sure your existing Kubernetes cluster meets all the requirements,
```bash
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-```
-```bash
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml
```
-{{< notice note >}}
-
-If your server has trouble accessing GitHub, you can copy the content in [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml) and [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) respectively and past it to local files. You then can use `kubectl apply -f` for the local files to install KubeSphere.
-
-{{ notice >}}
-
- Inspect the logs of installation:
```bash
@@ -61,10 +53,8 @@ If you start with a default minimal installation, refer to [Enable Pluggable Com
{{< notice tip >}}
-- Pluggable components can be enabled either before or after the installation. Please refer to the example file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) for more details.
+- Pluggable components can be enabled either before or after the installation. Please refer to the example file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/blob/release-3.0/deploy/cluster-configuration.yaml) for more details.
- Make sure there is enough CPU and memory available in your cluster.
- It is highly recommended that you install these pluggable components to discover the full-stack features and capabilities provided by KubeSphere.
{{ notice >}}
-
-
diff --git a/content/en/docs/installing-on-linux/_index.md b/content/en/docs/installing-on-linux/_index.md
index cb353be3e..c467511ab 100644
--- a/content/en/docs/installing-on-linux/_index.md
+++ b/content/en/docs/installing-on-linux/_index.md
@@ -9,7 +9,7 @@ weight: 2000
icon: "/images/docs/docs.svg"
---
-This chapter demonstrates how to use KubeKey to provision a production-ready Kubernetes and KubeSphere cluster on Linux in different environments. You can also use KubeKey to easily scale up and down your cluster and set various storage classes based on your needs.
+This chapter demonstrates how to use KubeKey to provision a production-ready Kubernetes and KubeSphere cluster on Linux in different environments. You can also use KubeKey to easily scale out and in your cluster and set various storage classes based on your needs.
## Introduction
@@ -53,11 +53,11 @@ Learn how to create a high-availability cluster on QingCloud platform.
### [Add New Nodes](../installing-on-linux/cluster-operation/add-new-nodes/)
-Add more nodes to scale up your cluster.
+Add more nodes to scale out your cluster.
### [Remove Nodes](../installing-on-linux/cluster-operation/remove-nodes/)
-Cordon a node and even delete a node to scale down your cluster.
+Cordon a node and even delete a node to scale in your cluster.
## Uninstalling
@@ -69,7 +69,7 @@ Remove KubeSphere and Kubernetes from your machines.
### [Configure Booster for Installation](../installing-on-linux/faq/configure-booster/)
-Set a registry mirror to speed up downloads during installation.
+Set a registry mirror to speed up image downloads during installation.
## Most Popular Pages
diff --git a/content/en/docs/installing-on-linux/introduction/storage-configuration.md b/content/en/docs/installing-on-linux/introduction/storage-configuration.md
index 762bd9f29..fbf01e324 100644
--- a/content/en/docs/installing-on-linux/introduction/storage-configuration.md
+++ b/content/en/docs/installing-on-linux/introduction/storage-configuration.md
@@ -36,12 +36,35 @@ If you plan to install KubeSphere on [QingCloud](https://www.qingcloud.com/), [Q
### Chart Config
```yaml
config:
- qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" # <--ToBeReplaced-->
- qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" # <--ToBeReplaced ->
- zone: "pek3a" # <--ToBeReplaced-->
+ qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" # Replace it with your own key id.
+ qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" # Replace it with your own access key.
+ zone: "pek3a" # Lowercase letters only.
sc:
- isDefaultClass: true
+ isDefaultClass: true # Set it as the default storage class.
```
+You need to create this file of chart configurations and input the values above manually.
+
+#### Key
+
+To get values for `qy_access_key_id` and `qy_secret_access_key`, log in the web console of [QingCloud](https://console.qingcloud.com/login) and refer to the image below to create a key first. Download the key after it is created, which is stored in a csv file.
+
+
+
+#### Zone
+
+The field `zone` specifies where your cloud volumes are deployed. On QingCloud Platform, you must select a zone before you create volumes.
+
+
+
+Make sure the value you specify for `zone` matches the region ID below:
+
+| Zone | Region ID |
+| ------------------------------------------- | ----------------------- |
+| Shanghai1-A/Shanghai1-B | sh1a/sh1b |
+| Beijing3-A/Beijing3-B/Beijing3-C/Beijing3-D | pek3a/pek3b/pek3c/pek3d |
+| Guangdong2-A/Guangdong2-B | gd2a/gd2b |
+| Asia-Pacific 2-A | ap2a |
+
If you want to configure more values, see [chart configuration for QingCloud CSI](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration).
### Add-on Config
diff --git a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md
index ac519f3cf..7e885dd11 100644
--- a/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md
+++ b/content/en/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere.md
@@ -464,7 +464,7 @@ spec:
enabled: false
notification: # It supports notification management in multi-tenant Kubernetes clusters. It allows you to set AlertManager as its sender, and receivers include Email, Wechat Work, and Slack.
enabled: false
- openpitrix: # Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management
+ openpitrix: # Whether to install KubeSphere App Store. It provides an application store for Helm-based applications, and offer application lifecycle management
enabled: false
servicemesh: # Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology
enabled: false
diff --git a/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md b/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md
index 8e6bf3809..213b13219 100644
--- a/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md
+++ b/content/en/docs/multicluster-management/enable-multicluster/agent-connection.md
@@ -16,7 +16,7 @@ Multi-cluster management requires Kubesphere to be installed on the target clust
## Agent Connection
-The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the H Cluster cannot access the M Cluster directly, you can expose the proxy service address of the H cluster. This enables the M Cluster to connect to the H cluster through the agent. This method is applicable when the M Cluster is in a private environment (e.g. IDC) and the H Cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers.
+The component [Tower](https://github.com/kubesphere/tower) of KubeSphere is used for agent connection. Tower is a tool for network connection between clusters through the agent. If the Host Cluster (hereafter referred to as H Cluster) cannot access the Member Cluster (hereafter referred to as M Cluster) directly, you can expose the proxy service address of the H cluster. This enables the M Cluster to connect to the H cluster through the agent. This method is applicable when the M Cluster is in a private environment (e.g. IDC) and the H Cluster is able to expose the proxy service. The agent connection is also applicable when your clusters are distributed across different cloud providers.
### Prepare a Host Cluster
@@ -28,13 +28,13 @@ If you already have a standalone KubeSphere installed, you can set the value of
- Option A - Use Web Console:
-Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
+ Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
-```shell
-kubectl edit cc ks-installer -n kubesphere-system
-```
+ ```shell
+ kubectl edit cc ks-installer -n kubesphere-system
+ ```
Scroll down and set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective:
@@ -47,7 +47,7 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
-There is no big difference if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
+There is no big difference than installing a standalone KubeSphere if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
multicluster:
@@ -93,40 +93,40 @@ Note: Generally, there is always a LoadBalancer solution in the public cloud, an
1. If you cannot see a corresponding address displayed (the EXTERNAL-IP is pending), you need to manually set the proxy address. For example, you have an available public IP address `139.198.120.120`, and the port `8080` of this IP address has been forwarded to the port `30721` of the cluster. Execute the following command to check the service.
-```shell
-kubectl -n kubesphere-system get svc
-```
+ ```shell
+ kubectl -n kubesphere-system get svc
+ ```
-```shell
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-tower LoadBalancer 10.233.63.191 8080:30721/TCP 16h
-```
+ ```shell
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ tower LoadBalancer 10.233.63.191 8080:30721/TCP 16h
+ ```
2. Add the value of `proxyPublishAddress` to the configuration file of ks-installer and input the public IP address and port number as follows.
-- Option A - Use Web Console:
+ - Option A - Use Web Console:
-Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
+ Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
-- Option B - Use Kubectl:
+ - Option B - Use Kubectl:
-```bash
-kubectl -n kubesphere-system edit clusterconfiguration ks-installer
-```
+ ```bash
+ kubectl -n kubesphere-system edit clusterconfiguration ks-installer
+ ```
-Navigate to `multicluster` and add a new line for `proxyPublishAddress` to define the IP address so access tower.
+ Navigate to `multicluster` and add a new line for `proxyPublishAddress` to define the IP address so access tower.
-```yaml
-multicluster:
- clusterRole: host
- proxyPublishAddress: http://139.198.120.120:8080 # Add this line to set the address to access tower
-```
+ ```yaml
+ multicluster:
+ clusterRole: host
+ proxyPublishAddress: http://139.198.120.120:8080 # Add this line to set the address to access tower
+ ```
-3. Save the configuration and restart `ks-apiserver`.
+3. Save the configuration and wait for a while, or you can manually restart `ks-apiserver` to make the change effective immediately using the following command.
-```shell
-kubectl -n kubesphere-system rollout restart deployment ks-apiserver
-```
+ ```shell
+ kubectl -n kubesphere-system rollout restart deployment ks-apiserver
+ ```
{{ tab >}}
@@ -154,13 +154,13 @@ If you already have a standalone KubeSphere installed, you can set the value of
- Option A - Use Web Console:
-Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
+ Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
-```shell
-kubectl edit cc ks-installer -n kubesphere-system
-```
+ ```shell
+ kubectl edit cc ks-installer -n kubesphere-system
+ ```
Input the corresponding `jwtSecret` shown above:
@@ -180,7 +180,7 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
-There is no big difference if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
+There is no big difference than installing a standalone KubeSphere if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
authentication:
@@ -198,23 +198,18 @@ multicluster:
{{ tabs >}}
-
### Import Cluster
1. Open the H Cluster dashboard and click **Add Cluster**.
-
-
+ 
2. Enter the basic information of the cluster to be imported and click **Next**.
+ 
-
+3. In **Connection Method**, select **Cluster connection agent** and click **Import**. It will show the agent deployment generated by the H Cluster in the console.
+ 
-3. In **Connection Method**, select **Cluster connection agent** and click **Import**.
-
-
-
-4. Create an `agent.yaml` file in the M Cluster based on the instruction, then copy and paste the deployment to the file. Execute `kubectl create -f agent.yaml` on the node and wait for the agent to be up and running. Please make sure the proxy address is accessible to the M Cluster.
+4. Create an `agent.yaml` file in the M Cluster based on the instruction, then copy and paste the agent deployment to the file. Execute `kubectl create -f agent.yaml` on the node and wait for the agent to be up and running. Please make sure the proxy address is accessible to the M Cluster.
5. You can see the cluster you have imported in the H Cluster when the cluster agent is up and running.
-
-
+ 
diff --git a/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md b/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md
index 953375584..59c2e1774 100644
--- a/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md
+++ b/content/en/docs/multicluster-management/enable-multicluster/direct-connection.md
@@ -28,13 +28,13 @@ If you already have a standalone KubeSphere installed, you can set the value of
- Option A - Use Web Console:
-Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
+ Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
-```shell
-kubectl edit cc ks-installer -n kubesphere-system
-```
+ ```shell
+ kubectl edit cc ks-installer -n kubesphere-system
+ ```
Scroll down and set the value of `clusterRole` to `host`, then click **Update** (if you use the web console) to make it effective:
@@ -47,7 +47,7 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
-There is no big difference if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
+There is no big difference than installing a standalone KubeSphere if you define a host cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
multicluster:
@@ -86,13 +86,13 @@ If you already have a standalone KubeSphere installed, you can set the value of
- Option A - Use Web Console:
-Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
+ Use `admin` account to log in the console and go to **CRDs** on the **Cluster Management** page. Enter the keyword `ClusterConfiguration` and go to its detail page. Edit the YAML of `ks-installer`, which is similar to [Enable Pluggable Components](../../../pluggable-components/).
- Option B - Use Kubectl:
-```shell
-kubectl edit cc ks-installer -n kubesphere-system
-```
+ ```shell
+ kubectl edit cc ks-installer -n kubesphere-system
+ ```
Input the corresponding `jwtSecret` shown above:
@@ -112,7 +112,7 @@ multicluster:
{{< tab "KubeSphere has not been installed" >}}
-There is no big difference if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
+There is no big difference than installing a standalone KubeSphere if you define a member cluster before installation. Please note that the `clusterRole` in `config-sample.yaml` or `cluster-configuration.yaml` has to be set as follows:
```yaml
authentication:
@@ -139,23 +139,18 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
### Import Cluster
1. Open the H Cluster dashboard and click **Add Cluster**.
-
-
+ 
2. Enter the basic information of the cluster to be imported and click **Next**.
-
-
+ 
3. In **Connection Method**, select **Direct Connection to Kubernetes cluster**.
4. [Retrieve the KubeConfig](../retrieve-kubeconfig), copy the KubeConfig of the Member Cluster and paste it into the box.
-
-{{< notice tip >}}
-Please make sure the `server` address in KubeConfig is accessible on any node of the H Cluster. For `KubeSphere API Server` address, you can fill in the KubeSphere APIServer address or leave it blank.
-{{ notice >}}
-
-
+ {{< notice tip >}}
+ Please make sure the `server` address in KubeConfig is accessible on any node of the H Cluster.
+ {{ notice >}}
+ 
5. Click **Import** and wait for cluster initialization to finish.
-
-
+ 
diff --git a/content/en/docs/pluggable-components/alerting-notification.md b/content/en/docs/pluggable-components/alerting-notification.md
index 8db1091fe..9eb2b704f 100644
--- a/content/en/docs/pluggable-components/alerting-notification.md
+++ b/content/en/docs/pluggable-components/alerting-notification.md
@@ -11,7 +11,7 @@ weight: 3545
Alerting and Notification are two important building blocks of observability, closely related monitoring and logging. The alerting system in KubeSphere, coupled with the proactive failure notification system, allows users to know activities of interest based on alert policies. When a predefined threshold of a certain metric is reached, an alert will be sent to preconfigured recipients, the notification method of which can be set by yourself, including Email, WeChat Work and Slack. With a highly functional alerting and notification system in place, you can quickly identify and resolve potential issues in advance before they affect your business.
-For more information, see Alerting Policy and Message.
+For more information, see [Alerting Policy](../../project-user-guide/alerting/alerting-policy) and [Alerting Message](../../project-user-guide/alerting/alerting-message).
{{< notice note >}}
@@ -23,99 +23,92 @@ It is recommended that you enable Alerting and Notification together so that use
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Alerting and Notification in this mode (e.g. for testing purpose), refer to the following section to see how Alerting and Notification can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-alerting:
- enabled: true # Change "false" to "true"
-notification:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ alerting:
+ enabled: true # Change "false" to "true"
+ notification:
+ enabled: true # Change "false" to "true"
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Alerting and Notification, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional components Alerting and Notification need to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Alerting and Notification, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `alerting` and `notification` and enable them by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `alerting` and `notification` and enable them by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-alerting:
- enabled: true # Change "false" to "true"
-notification:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ alerting:
+ enabled: true # Change "false" to "true"
+ notification:
+ enabled: true # Change "false" to "true"
+ ```
-4. Execute the following command to start installation:
+3. Execute the following commands to start installation:
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
+
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Alerting and Notification after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-alerting:
- enabled: true # Change "false" to "true"
-notification:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ alerting:
+ enabled: true # Change "false" to "true"
+ notification:
+ enabled: true # Change "false" to "true"
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/app-store.md b/content/en/docs/pluggable-components/app-store.md
index 0c5102ed5..bf3985060 100644
--- a/content/en/docs/pluggable-components/app-store.md
+++ b/content/en/docs/pluggable-components/app-store.md
@@ -15,99 +15,92 @@ Internally, KubeSphere App Store can serve as a place for different teams to sha

-For more information, see App Store.
+For more information, see [App Store](../../application-store/).
## Enable App Store before Installation
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable App Store in this mode (e.g. for testing purpose), refer to the following section to see how App Store can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `openpitrix` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-openpitrix:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ openpitrix:
+ enabled: true # Change "false" to "true"
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install App Store, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component App Store needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable App Store, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `openpitrix` and enable App Store by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `openpitrix` and enable App Store by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-openpitrix:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ openpitrix:
+ enabled: true # Change "false" to "true"
+ ```
-4. Execute the following command to start installation:
+3. Execute the following commands to start installation:
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
+
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable App Store after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-openpitrix:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ openpitrix:
+ enabled: true # Change "false" to "true"
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/auditing-logs.md b/content/en/docs/pluggable-components/auditing-logs.md
index 9d3fef9cb..87a8e0357 100644
--- a/content/en/docs/pluggable-components/auditing-logs.md
+++ b/content/en/docs/pluggable-components/auditing-logs.md
@@ -7,157 +7,144 @@ linkTitle: "KubeSphere Auditing Logs"
weight: 3525
---
-## What are KubeSphere Auditing Logs?
+## What are KubeSphere Auditing Logs
KubeSphere Auditing Log System provides a security-relevant chronological set of records documenting the sequence of activities related to individual users, managers, or other components of the system. Each request to KubeSphere generates an event that is then written to a webhook and processed according to a certain rule.
-For more information, see Logging, Events, and Auditing.
+For more information, see [Auditing Log Query](../../toolbox/auditing/auditing-query).
## Enable Auditing Logs before Installation
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Auditing in this mode (e.g. for testing purpose), refer to the following section to see how Auditing can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `auditing` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-auditing:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ auditing:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in **config-sample.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
-
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Auditing, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component Auditing needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Auditing, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `auditing` and enable Auditing by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `auditing` and enable Auditing by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-auditing:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ auditing:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in **cluster-configuration.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
-{{ notice >}}
+3. Execute the following commands to start installation:
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-4. Execute the following command to start installation:
-
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Auditing Logs after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-auditing:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ auditing:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
-
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
@@ -183,7 +170,7 @@ kubectl get pod -n kubesphere-logging-system
The output may look as follows if the component runs successfully:
-```bash
+```yaml
NAME READY STATUS RESTARTS AGE
elasticsearch-logging-curator-elasticsearch-curator-159872n9g9g 0/1 Completed 0 2d10h
elasticsearch-logging-curator-elasticsearch-curator-159880tzb7x 0/1 Completed 0 34h
diff --git a/content/en/docs/pluggable-components/devops.md b/content/en/docs/pluggable-components/devops.md
index 753e4c14c..4970f49d1 100644
--- a/content/en/docs/pluggable-components/devops.md
+++ b/content/en/docs/pluggable-components/devops.md
@@ -13,99 +13,92 @@ KubeSphere DevOps System is designed for CI/CD workflows in Kubernetes. Based on
The DevOps system offers an enabling environment for users as apps can be automatically released to the same platform. It is also compatible with third-party private image registries (e.g. Harbor) and code repositories (e.g. GitLab/GitHub/SVN/BitBucket). As such, it creates excellent user experiences by providing users with comprehensive, visualized CI/CD pipelines which are extremely useful in air-gapped environments.
-For more information, see DevOps Administration.
+For more information, see [DevOps User Guide](../../devops-user-guide/).
## Enable DevOps before Installation
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable DevOps in this mode (e.g. for testing purpose), refer to the following section to see how DevOps can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `devops` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-devops:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ devops:
+ enabled: true # Change "false" to "true"
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install DevOps, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component DevOps needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable DevOps, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `devops` and enable DevOps by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `devops` and enable DevOps by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-devops:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ devops:
+ enabled: true # Change "false" to "true"
+ ```
-4. Execute the following command to start installation:
+3. Execute the following commands to start installation:
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
+
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable DevOps after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-devops:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ devops:
+ enabled: true # Change "false" to "true"
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/events.md b/content/en/docs/pluggable-components/events.md
index d3b5626d7..0767c5af4 100644
--- a/content/en/docs/pluggable-components/events.md
+++ b/content/en/docs/pluggable-components/events.md
@@ -11,153 +11,140 @@ weight: 3530
KubeSphere events allow users to keep track of what is happening inside a cluster, such as node scheduling status and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. To query events, users can quickly launch the web Toolkit and enter related information in the search bar with different filters (e.g keyword and project) available. Events can also be archived to third-party tools, such as Elasticsearch, Kafka or Fluentd.
-For more information, see Logging, Events and Auditing.
+For more information, see [Events Query](../../toolbox/events-query).
## Enable Events before Installation
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Events in this mode (e.g. for testing purpose), refer to the following section to see how Events can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `events` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-events:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ events:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in **config-sample.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
-
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Events, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component Events needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Events, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `events` and enable Events by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `events` and enable Events by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-events:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ events:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in **cluster-configuration.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
-{{ notice >}}
+3. Execute the following commands to start installation:
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-4. Execute the following command to start installation:
-
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Events after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-events:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ events:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
-
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/faq/logging.md b/content/en/docs/pluggable-components/faq/logging.md
index 26b707f06..7131f0bc7 100644
--- a/content/en/docs/pluggable-components/faq/logging.md
+++ b/content/en/docs/pluggable-components/faq/logging.md
@@ -7,13 +7,22 @@ linkTitle: "Logging"
weight: 3535
---
-## How to change the log store to external elasticsearch and shut down the internal elasticsearch?
+- [How to change the log store to external elasticsearch and shut down the internal elasticsearch](#how-to-change-the-log-store-to-external-elasticsearch-and-shut-down-the-internal-elasticsearch)
+- [How to change the log store to elasticsearch with X-Pack Security enabled](#how-to-change-the-log-store-to-elasticsearch-with-x-pack-security-enabled)
+- [How to modify log data retention days](#how-to-modify-log-data-retention-days)
+- [Cannot find out logs from workloads on some nodes in Toolbox](#cannot-find-out-logs-from-workloads-on-some-nodes-in-toolbox)
+- [The log view page in Toolbox gets stuck in loading](#the-log-view-page-in-toolbox-gets-stuck-in-loading)
+- [Toolbox shows no log record today](#toolbox-shows-no-log-record-today)
+- [Internal Server Error when viewing logs in Toolbox](#internal-server-error-when-viewing-logs-in-toolbox)
+- [How to make KubeSphere only collect logs from specified workloads](#how-to-make-kubesphere-only-collect-logs-from-specified-workloads)
+
+## How to change the log store to external elasticsearch and shut down the internal elasticsearch
If you are using KubeSphere internal elasticsearch and want to change it to your external alternate, follow the guide below. Otherwise, if you haven't enabled logging system yet, go to [Enable Logging](../../logging/) to setup external elasticsearch directly.
First, update KubeKey config.
-```shell
+```bash
kubectl edit cc -n kubesphere-system ks-installer
```
@@ -21,51 +30,51 @@ kubectl edit cc -n kubesphere-system ks-installer
- Set `es.externalElasticsearchUrl` to the address of your elasticsearch and `es.externalElasticsearchPort` to its port number.
-```shell
-apiVersion: installer.kubesphere.io/v1alpha1
-kind: ClusterConfiguration
-metadata:
- name: ks-installer
- namespace: kubesphere-system
- ...
-spec:
- ...
- common:
- es:
- # elasticsearchDataReplicas: 1
- # elasticsearchDataVolumeSize: 20Gi
- # elasticsearchMasterReplicas: 1
- # elasticsearchMasterVolumeSize: 4Gi
- elkPrefix: logstash
- logMaxAge: 7
- externalElasticsearchUrl: <192.168.0.2>
- externalElasticsearchPort: <9200>
- ...
-status:
- ...
- # logging:
- # enabledTime: 2020-08-10T02:05:13UTC
- # status: enabled
- ...
-```
+ ```yaml
+ apiVersion: installer.kubesphere.io/v1alpha1
+ kind: ClusterConfiguration
+ metadata:
+ name: ks-installer
+ namespace: kubesphere-system
+ ...
+ spec:
+ ...
+ common:
+ es:
+ # elasticsearchDataReplicas: 1
+ # elasticsearchDataVolumeSize: 20Gi
+ # elasticsearchMasterReplicas: 1
+ # elasticsearchMasterVolumeSize: 4Gi
+ elkPrefix: logstash
+ logMaxAge: 7
+ externalElasticsearchUrl: <192.168.0.2>
+ externalElasticsearchPort: <9200>
+ ...
+ status:
+ ...
+ # logging:
+ # enabledTime: 2020-08-10T02:05:13UTC
+ # status: enabled
+ ...
+ ```
Second, rerun ks-installer.
-```shell
+```bash
kubectl rollout restart deploy -n kubesphere-system ks-installer
```
Finally, to remove the internal elasticsearch, run the following command. Please make sure you have backed up data in the internal elasticsearch.
-```shell
+```bash
helm uninstall -n kubesphere-logging-system elasticsearch-logging
```
-## How to change the log store to elasticsearch with X-Pack Security enabled?
+## How to change the log store to elasticsearch with X-Pack Security enabled
Currently, KubeSphere doesn't support integration with elasticsearch having X-Pack Security enabled. This feature is coming soon.
-## How to modify log data retention days?
+## How to modify log data retention days
You need update KubeKey config and rerun ks-installer.
@@ -77,53 +86,53 @@ kubectl edit cc -n kubesphere-system ks-installer
- Set `es.logMaxAge` to the desired days (7 by default)
-```shell
-apiVersion: installer.kubesphere.io/v1alpha1
-kind: ClusterConfiguration
-metadata:
- name: ks-installer
- namespace: kubesphere-system
- ...
-spec:
- ...
- common:
- es:
- ...
- logMaxAge: <7>
- ...
-status:
- ...
- # logging:
- # enabledTime: 2020-08-10T02:05:13UTC
- # status: enabled
- ...
-```
+ ```yaml
+ apiVersion: installer.kubesphere.io/v1alpha1
+ kind: ClusterConfiguration
+ metadata:
+ name: ks-installer
+ namespace: kubesphere-system
+ ...
+ spec:
+ ...
+ common:
+ es:
+ ...
+ logMaxAge: <7>
+ ...
+ status:
+ ...
+ # logging:
+ # enabledTime: 2020-08-10T02:05:13UTC
+ # status: enabled
+ ...
+ ```
-- Rerun ks-installer
+Rerun ks-installer
-```shell
+```bash
kubectl rollout restart deploy -n kubesphere-system ks-installer
```
-## Cannot find out logs from workloads on some nodes in Toolbox.
+## Cannot find out logs from workloads on some nodes in Toolbox
-If you adopt [Multi-node installation](../../installing-on-linux/introduction/multioverview/) and are using symbolic links for docker root directory, make sure all nodes follow the exactly same symbolic links. Logging agents are deployed in DaemonSet onto nodes. Any discrepancy in container log paths may cause failure of collection on that node.
+If you adopt [Multi-node installation](../../../installing-on-linux/introduction/multioverview/) and are using symbolic links for docker root directory, make sure all nodes follow the exactly same symbolic links. Logging agents are deployed in DaemonSet onto nodes. Any discrepancy in container log paths may cause failure of collection on that node.
To find out the docker root directory path on nodes, you can run the following command. Make sure the same value applies to all nodes.
-```
+```bash
docker info -f '{{.DockerRootDir}}'
```
-## The log view page in Toolbox gets stuck in loading.
+## The log view page in Toolbox gets stuck in loading
If you observe log searching gets stuck in loading, please check the storage system you are using. For example, a malconfigured NFS storage system may cause this issue.
## Toolbox shows no log record today
-Please check if your log volume exceeds the storage capacity limit of elasticsearch. If so, increase elasticsearch disk volume.
+Please check if your log volume exceeds the storage capacity limit of elasticsearch. If so, increase elasticsearch disk volume.
-## Internal Server Error when viewing logs in Toolbox
+## Internal Server Error when viewing logs in Toolbox
If you observe Internal Server Error in the Toolbox, there may be several reasons leading to this issue:
@@ -131,14 +140,14 @@ If you observe Internal Server Error in the Toolbox, there may be several reason
- Invalid elasticsearch host and port
- Elasticsearch health status is red
-## How to make KubeSphere only collect logs from specified workloads?
+## How to make KubeSphere only collect logs from specified workloads
KubeSphere logging agent is powered by Fluent Bit. You need update Fluent Bit config to exclude certain workload logs. To modify Fluent Bit input config, run the following command:
-```shell
+```bash
kubectl edit input -n kubesphere-logging-system tail
```
Update the field `Input.Spec.Tail.ExcludePath`. For example, set the path to `/var/log/containers/*_kube*-system_*.log` to exclude any log from system components.
-Read the project [Fluent Bit Operator](https://github.com/kubesphere/fluentbit-operator) for more information.
\ No newline at end of file
+Read the project [Fluent Bit Operator](https://github.com/kubesphere/fluentbit-operator) for more information.
diff --git a/content/en/docs/pluggable-components/faq/monitoring.md b/content/en/docs/pluggable-components/faq/monitoring.md
index 16d59f66f..f19d6980c 100644
--- a/content/en/docs/pluggable-components/faq/monitoring.md
+++ b/content/en/docs/pluggable-components/faq/monitoring.md
@@ -7,25 +7,34 @@ linkTitle: "Monitoring"
weight: 3540
---
-## How to access KubeSphere Prometheus console?
+- [How to access KubeSphere Prometheus console](#how-to-access-kubesphere-prometheus-console)
+- [Host port 9100 conflict caused by node exporter](#host-port-9100-conflict-caused-by-node-exporter)
+- [Conflicts with preexisting prometheus operator](#conflicts-with-preexisting-prometheus-operator)
+- [How to modify monitoring data retention days](#how-to-modify-monitoring-data-retention-days)
+- [No monitoring data for kube-scheduler and kube-controller-manager](#no-monitoring-data-for-kube-scheduler-and-kube-controller-manager)
+- [No monitoring data for the last few minutes](#no-monitoring-data-for-the-last-few-minutes)
+- [No monitoring data for both nodes and the control plane](#no-monitoring-data-for-both-nodes-and-the-control-plane)
+- [Prometheus produces error log: opening storage failed, no such file or directory](#prometheus-produces-error-log-opening-storage-failed-no-such-file-or-directory)
+
+## How to access KubeSphere Prometheus console
KubeSphere monitoring engine is powered by Prometheus. For debugging purpose, you may want to access the built-in Prometheus service via NodePort. To do so, run the following command to edit the service type:
-```shell
+```bash
kubectl edit svc -n kubesphere-monitoring-system prometheus-k8s
-```
+```
## Host port 9100 conflict caused by node exporter
If you have processes occupying host port 9100, node exporter under `kubesphere-monitoring-system` will be crashing. To resolve the conflict, you need to either terminate the process or change node exporter to another available port.
To adopt another host port, for example `29100`, run the following command and replace all `9100` to `29100` (5 places require change).
-
- ```shell
+
+ ```bash
kubectl edit ds -n kubesphere-monitoring-system node-exporter
```
-
- ```shell
+
+ ```yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
@@ -60,21 +69,21 @@ If you have processes occupying host port 9100, node exporter under `kubesphere-
If you have deployed Prometheus Operator on your own, make sure the Prometheus Operator gets removed before you installing KubeSphere. Otherwise, there may be conflicts that KubeSphere built-in prometheus operator selects duplicate ServiceMonitor objects.
-## How to modify monitoring data retention days?
+## How to modify monitoring data retention days
Run the following command to edit the max retention days. Find out the field `retention` and update it to the desired days (7 by default).
-```shell
+```bash
kubectl edit prometheuses -n kubesphere-monitoring-system k8s
```
-## No monitoring data for kube-scheduler / kube-controller-manager
+## No monitoring data for kube-scheduler and kube-controller-manager
First, please make sure the flag `--bind-address` is set to `0.0.0.0` (default) rather than `127.0.0.1`. Prometheus may need reachability to theses components from other hosts.
Second, please check the presence of endpoint objects for kube-scheduler and kube-controller-manager. If they are missing, please create them manually by creating services selecting target pods.
-```shell
+```bash
kubectl get ep -n kube-system | grep -E 'kube-scheduler|kube-controller-manager'
```
@@ -92,13 +101,13 @@ Chinese readers may refer to [the discussion](https://kubesphere.com.cn/forum/d/
If the Prometheus pod under kubesphere-monitoring-system is crashing and produces the following error log, your Prometheus data may be corrupt and need manual deletion to recover.
-```
+```shell
level=error ts=2020-10-14T17:43:30.485Z caller=main.go:764 err="opening storage failed: block dir: \"/prometheus/01EM0016F8FB33J63RNHFMHK3\": open /prometheus/01EM0016F8FB33J63RNHFMHK3/meta.json: no such file or directory"
-```
+```
Exec into the Prometheus pod (if possible), and remove the block dir `/prometheus/01EM0016F8FB33J63RNHFMHK3`:
-```shell
+```bash
kubectl exec -it -n kubesphere-monitoring-system prometheus-k8s-0 -c prometheus sh
rm -rf 01EM0016F8FB33J63RNHFMHK3/
diff --git a/content/en/docs/pluggable-components/logging.md b/content/en/docs/pluggable-components/logging.md
index d36c9edad..2a9ea74a8 100644
--- a/content/en/docs/pluggable-components/logging.md
+++ b/content/en/docs/pluggable-components/logging.md
@@ -11,7 +11,7 @@ weight: 3535
KubeSphere provides a powerful, holistic and easy-to-use logging system for log collection, query and management. It covers logs at varied levels, including tenants, infrastructure resources, and applications. Users can search logs from different dimensions, such as project, workload, Pod and keyword. Compared with Kibana, the tenant-based logging system of KubeSphere features better isolation and security among tenants as each tenant can only view his or her own logs. Apart from KubeSphere's own logging system, the container platform also allows users to add third-party log collectors, such as Elasticsearch, Kafka and Fluentd.
-For more information, see Logging, Events and Auditing.
+For more information, see [Log Query](../../toolbox/log-query).
## Enable Logging before Installation
@@ -21,148 +21,134 @@ When you install KubeSphere on Linux, you need to create a configuration file, w
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Logging in this mode (e.g. for testing purpose), refer to the following section to see how Logging can be installed after installation.
+ {{ notice >}}
-{{ notice >}}
-
-{{< notice warning >}}
-
-If you adopt [Multi-node installation](../../installing-on-linux/introduction/multioverview/) and are using symbolic links for docker root directory, make sure all nodes follow the exactly same symbolic links. Logging agents are deployed in DaemonSet onto nodes. Any discrepancy in container log path may cause failure of collection on that node.
-
-{{ notice >}}
+ {{< notice warning >}}
+If you adopt [Multi-node installation](../../installing-on-linux/introduction/multioverview/) and are using symbolic links for docker root directory, make sure all nodes follow the exactly same symbolic links. Logging agents are deployed in DaemonSet onto nodes. Any discrepancy in container log path may cause failure of collection on that node.
+ {{ notice >}}
2. In this file, navigate to `logging` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-logging:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ logging:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in **config-sample.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Logging, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component Logging needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Logging, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `logging` and enable Logging by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `logging` and enable Logging by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-logging:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ logging:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in **cluster-configuration.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
-{{ notice >}}
+3. Execute the following commands to start installation:
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
-4. Execute the following command to start installation:
-
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Logging after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-logging:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ logging:
+ enabled: true # Change "false" to "true"
+ ```
-{{< notice note >}}
+ {{< notice note >}}
+By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following values in this yaml file if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+ {{ notice >}}
-By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
-
-{{ notice >}}
-
-```bash
-es: # Storage backend for logging, tracing, events and auditing.
- elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
- elasticsearchDataReplicas: 1 # total number of data nodes
- elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
- elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
- logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
- elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
- externalElasticsearchUrl: # The URL of external Elasticsearch
- externalElasticsearchPort: # The port of external Elasticsearch
-```
+ ```yaml
+ es: # Storage backend for logging, tracing, events and auditing.
+ elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
+ elasticsearchDataReplicas: 1 # total number of data nodes
+ elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes
+ elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes
+ logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
+ elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log
+ externalElasticsearchUrl: # The URL of external Elasticsearch
+ externalElasticsearchPort: # The port of external Elasticsearch
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/network-policy.md b/content/en/docs/pluggable-components/network-policy.md
index d5023d3c6..38178fe06 100644
--- a/content/en/docs/pluggable-components/network-policy.md
+++ b/content/en/docs/pluggable-components/network-policy.md
@@ -24,93 +24,86 @@ For more information, see [Network Policies](https://kubernetes.io/docs/concepts
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Network Policy in this mode (e.g. for testing purpose), refer to the following section to see how Network Policy can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-networkpolicy:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ networkpolicy:
+ enabled: true # Change "false" to "true"
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Network Policy, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component Network Polict needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Network Policy, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `networkpolicy` and enable Network Policy by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `networkpolicy` and enable Network Policy by changing `false` to `true` for `enabled`. Save the file after you finish..
-```bash
-networkpolicy:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ networkpolicy:
+ enabled: true # Change "false" to "true"
+ ```
-4. Execute the following command to start installation:
+3. Execute the following commands to start installation:
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
+
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Network Policy after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-networkpolicy:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ networkpolicy:
+ enabled: true # Change "false" to "true"
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/pluggable-components/service-mesh.md b/content/en/docs/pluggable-components/service-mesh.md
index 1abff4711..2096fdf6e 100644
--- a/content/en/docs/pluggable-components/service-mesh.md
+++ b/content/en/docs/pluggable-components/service-mesh.md
@@ -11,99 +11,92 @@ weight: 3540
On the basis of [Istio](https://istio.io/), KubeSphere Service Mesh visualizes microservices governance and traffic management. It features a powerful toolkit including **circuit breaking, blue-green deployment, canary release, traffic mirroring, distributed tracing, observability and traffic control**. Developers can easily get started with Service Mesh without any code hacking, with the learning curve of Istio greatly reduced. All features of KubeSphere Service Mesh are designed to meet users' demand for their business.
-For more information, see related sections in Project Administration and Usage.
+For more information, see [Grayscale Release](../../project-user-guide/grayscale-release/overview).
## Enable Service Mesh before Installation
### Installing on Linux
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
-```bash
-vi config-sample.yaml
-```
-
-{{< notice note >}}
+ ```bash
+ vi config-sample.yaml
+ ```
+ {{< notice note >}}
If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Service Mesh in this mode (e.g. for testing purpose), refer to the following section to see how Service Mesh can be installed after installation.
-
-{{ notice >}}
+ {{ notice >}}
2. In this file, navigate to `servicemesh` and change `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-servicemesh:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ servicemesh:
+ enabled: true # Change "false" to "true"
+ ```
3. Create a cluster using the configuration file:
-```bash
-./kk create cluster -f config-sample.yaml
-```
+ ```bash
+ ./kk create cluster -f config-sample.yaml
+ ```
### **Installing on Kubernetes**
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Service Mesh, do not use `kubectl apply -f` directly for this file.
+The process of installing KubeSphere on Kubernetes is same as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/) except the optional component Service Mesh needs to be enabled first in the [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml).
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Service Mesh, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and open it for editing.
-```bash
-vi cluster-configuration.yaml
-```
+ ```bash
+ vi cluster-configuration.yaml
+ ```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `servicemesh` and enable Service Mesh by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. In this local cluster-configuration.yaml file, navigate to `servicemesh` and enable Service Mesh by changing `false` to `true` for `enabled`. Save the file after you finish.
-```bash
-servicemesh:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ servicemesh:
+ enabled: true # Change "false" to "true"
+ ```
-4. Execute the following command to start installation:
+3. Execute the following commands to start installation:
-```bash
-kubectl apply -f cluster-configuration.yaml
-```
+ ```bash
+ kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml
+
+ kubectl apply -f cluster-configuration.yaml
+ ```
## Enable Service Mesh after Installation
1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
-
-
+ 
2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
-{{< notice info >}}
-
+ {{< notice info >}}
A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
-
-{{ notice >}}
+ {{ notice >}}
3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
-
-
+ 
4. In this yaml file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
-```bash
-servicemesh:
- enabled: true # Change "false" to "true"
-```
+ ```yaml
+ servicemesh:
+ enabled: true # Change "false" to "true"
+ ```
5. You can use the web kubectl to check the installation process by executing the following command:
-```bash
-kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
-```
-
-{{< notice tip >}}
+ ```bash
+ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
+ ```
+ {{< notice tip >}}
You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
-
-{{ notice >}}
+ {{ notice >}}
## Verify the Installation of Component
diff --git a/content/en/docs/project-user-guide/application-workloads/app-template.md b/content/en/docs/project-user-guide/application-workloads/app-template.md
deleted file mode 100644
index 6beb2a23c..000000000
--- a/content/en/docs/project-user-guide/application-workloads/app-template.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: "Application Template"
-keywords: 'kubernetes, chart, helm, KubeSphere, application'
-description: 'Application Template'
-
-linkTitle: "Application Template"
-weight: 2210
----
-
-TBD
diff --git a/content/en/docs/project-user-guide/application/_index.md b/content/en/docs/project-user-guide/application/_index.md
new file mode 100644
index 000000000..4fcb92e7b
--- /dev/null
+++ b/content/en/docs/project-user-guide/application/_index.md
@@ -0,0 +1,9 @@
+---
+linkTitle: "Application"
+weight: 2079
+
+_build:
+ render: false
+---
+
+TBD
diff --git a/content/en/docs/project-user-guide/application-workloads/composing-app.md b/content/en/docs/project-user-guide/application/composing-app.md
similarity index 100%
rename from content/en/docs/project-user-guide/application-workloads/composing-app.md
rename to content/en/docs/project-user-guide/application/composing-app.md
diff --git a/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md b/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md
new file mode 100644
index 000000000..c70fbb1b6
--- /dev/null
+++ b/content/en/docs/project-user-guide/application/deploy-app-from-appstore.md
@@ -0,0 +1,53 @@
+---
+title: "Deploy Applications from App Store"
+keywords: 'kubernetes, chart, helm, KubeSphere, application'
+description: 'Deploy Applications from App Store'
+
+
+weight: 2209
+---
+
+The application template is the storage, delivery, and management approach for the application in KubeSphere. The application template is built on the [Helm](https://helm.sh/) packaging specification and delivered through a unified public or private application repository. The application can be composed of one or more Kubernetes workloads and services according to the application's characteristics.
+
+Application templates visualize and provide deployment and management capabilities in KubeSphere, enabling users to quickly deploy applications to pointed projects based on application templates. The application template can serve as a middleware and business system created by the enterprise, which could be shared between the teams. It can also be used as the basis for constructing industry delivery standards, delivery processes and paths according to industry characteristics.
+
+Before using an application template, you need to add an application repository in advance. KubeSphere built an application repository service based on [OpenPitrix](https://openpitrix.io). Before using the application template, you need to upload the Helm application package to the object storage, then add an application repository in KubeSphere. It will automatically loads all the applications as App template under this repository, as described in [Add Application Repository](../deploy-app-from-repo).
+
+
+
+In addition, application templates can also be combined with OpenPitrix's application lifecycle management capabilities to support docking ISV, and regular users through application uploading, application review, deployment testing, application publishing, application version management and more, finnaly build a public or private application store where offers application services for KubeSphere. Companies can also build industry-wide public or internal application stores to enable standardized one-click delivery of applications, see [OpenPitrix Official Documentation](https://openpitrix.io/docs/v0.4/zh-CN/manual-guide/introduction).
+
+## Application List
+
+In all projects, an **application** portal is provided, which serves as an entry point for the application template. Once the application is deployed, it can also be used as a list of applications to manage all applications under the current project.
+
+
+
+Click **Deploy New Application** to go to the **App Templates** page.
+
+## Application Template
+
+### Add a sample repository
+
+As mentioned earlier, before using an application template, the cluster admin needs to pre-add the available application repository so that users can access and deploy the application in the application template.
+
+This document provides a sample application repository just for demonstration. Users can upload application packages in the object storage and add application repositories as needed.
+
+1. Sign in with the cluster admin account to the KubeSphere and go into the target workspace, then choose **App Management → App Repos** to enter the list page.
+
+
+
+2. Click **Add Repo** button.
+
+3. Fill in the basic information in the pop-up window, select https for the URL, fill in the blank with `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/`, then click the **Validate** button. After the validation is passed, click **OK** to complete it.
+
+
+
+
+### Access the application templates
+
+Log out and switch to sign in with project-regular account, the normal user of the project and go into the target project, then choose **Application Workloads → Applications → Deploy New Application → From App Templates → docs-demo-repo**, you can see that all the applications in the sample application repository have been imported into the application template, then you will be able to browse or search for the desired app for one-click deployment to the desired project.
+
+
+
+
\ No newline at end of file
diff --git a/content/en/docs/project-user-guide/application/deploy-app-from-repo.md b/content/en/docs/project-user-guide/application/deploy-app-from-repo.md
new file mode 100644
index 000000000..b82381ca5
--- /dev/null
+++ b/content/en/docs/project-user-guide/application/deploy-app-from-repo.md
@@ -0,0 +1,91 @@
+---
+title: "Deploy Applications from App Repository"
+keywords: 'kubernetes, chart, helm, KubeSphere, application'
+description: 'Deploy Applications from App Repository'
+
+
+weight: 2211
+---
+
+## Objective
+
+This tutorial shows you how to quickly deploy a [Grafana](https://grafana.com/) application using templates from KubeSphere application store sponsored by [OpenPitrix](https://github.com/openpitrix/openpitirx). The demonstration includes importing application repository, sharing and deploying apps within a workspace.
+
+## Prerequisites
+
+- You have enabled [KubeSphere App Store](../../pluggable-components/app-store)
+- You have completed the tutorial in [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/)
+
+## Hands-on Lab
+
+### Step 1: Add an Application Repository
+
+> Note: The application repository can be hosted by either object storage, e.g. [QingStor Object Storage](https://www.qingcloud.com/products/qingstor/), [AWS S3](https://aws.amazon.com/what-is-cloud-object-storage/), or by [GitHub Repository](https://github.com/). The packages are composed of Helm Chart template files of the applications. Therefore, before adding an application repository to KubeSphere, you need to create an object storage bucket and upload Helm packages in advance. This tutorial prepares a demo repository based on QingStor Object Storage.
+
+1.1. Sign in with `ws-admin` account, click **View Workspace** and navigate to **Workspace Settings → App Repos**, then click **Create App Repository**.
+
+
+
+1.2. Fill in the basic information, name it `demo-repo` and input the URL `https://helm-chart-repo.pek3a.qingstor.com/kubernetes-charts/`. You can validate if this URL is available, and choose **OK** when you have done.
+
+> Note: It will automatically import all of the applications from the Helm repository into KubeSphere. You can browse those app templates in each project.
+
+
+
+### Step 2: Browse App Templates
+
+2.1. Switch to use `project-regular` account to log in, then enter into `demo-project`.
+
+2.2. Click **Application Workloads → Applications**, click **Deploy New Application**.
+
+
+
+2.3. Choose **From App Templates** and select `demo-repo` from the dropdown list.
+
+
+
+2.4. Search `Grafana` and click into Grafana App. We will demonstrate deploying Grafana to Kubernetes as an example.
+
+> Note: The applications of this demo repository are synchronized from the Google Helm repo. Some applications may not be able to be deployed successfully, since the helm charts were maintained by different organizations.
+
+### Step 3: Deploy Grafana Application
+
+3.1. Click **Deploy** on the right. Generally you do not need to change any configuration, just click **Deploy**.
+
+
+
+3.2. Wait for two minutes, then you will see the application `grafana` showing `active` on the application list.
+
+
+
+### Step 4: Expose Grafana Service
+
+4.1. Click into Grafana application, and then enter into its service page.
+
+
+
+4.2. In this page, make sure its deployment and Pod are running, then click **More → Edit Internet Access**, and select **NodePort** in the dropdown list, click **OK** to save it.
+
+
+
+4.3. At this point, you will be able to access Grafana service from outside of the cluster.
+
+
+
+### Step 5: Access the Grafana Service
+
+In this step, we can access Grafana service using `${Node IP}:${NODEPORT}`, e.g. `http://192.168.0.54:31407`, or click the button **Click to visit** to access the Grafana dashboard.
+
+5.1. Note you have to obtain the account and password from the grafana secret in advance. Navigate to **Configuration Center → Secrets**, click into **grafana-l47bmc** with Type Default.
+
+
+
+5.2. Click the eye button to display the secret information, then copy and paste the values of **admin-user** and **admin-password** respectively.
+
+
+
+5.3. Open the Grafana login page, sign in with the **admin** account.
+
+
+
+
\ No newline at end of file
diff --git a/content/en/docs/project-user-guide/application/deploy-app-from-template.md b/content/en/docs/project-user-guide/application/deploy-app-from-template.md
new file mode 100644
index 000000000..f44d974fd
--- /dev/null
+++ b/content/en/docs/project-user-guide/application/deploy-app-from-template.md
@@ -0,0 +1,62 @@
+---
+title: "Deploy Applications from App Template"
+keywords: 'kubernetes, chart, helm, KubeSphere, application'
+description: 'Deploy Applications from App Template'
+
+
+weight: 2210
+---
+
+## Objective
+
+This tutorial shows you a simple example about how to quickly deploy a [Nginx](https://nginx.org/) application using templates from KubeSphere application store sponsored by [OpenPitrix](https://github.com/openpitrix/openpitirx). The demonstration includes one-click deploying apps within a workspace and exposing service by NodePort.
+
+## Prerequisites
+
+- You have enabled [KubeSphere App Store](../../pluggable-components/app-store)
+- You have completed the tutorial in [Create Workspace, Project, Account and Role](../../quick-start/create-workspace-and-project/)
+
+## Hands-on Lab
+
+### Step 1: Browse App Templates
+
+1.1. Switch to use `project-regular` account to log in, then enter into `demo-project`.
+
+1.2. Click **Application Workloads → Applications**, click **Deploy New Application**.
+
+
+
+1.3. Choose **From App Store** and enter into app store.
+
+
+
+
+
+1.4. Search `Nginx` and click into Nginx App. We will demonstrate how to one-click deploying Nginx to Kubernetes.
+
+### Step 2: One-click Deploy Nginx Application
+
+2.1. Click **Deploy** on the right. Generally you do not need to change any configuration, just click **Deploy**.
+
+
+
+2.2. Wait for two minutes, then you will see the application `nginx` showing `active` on the application list.
+
+
+
+### Step 3: Expose Nginx Web Service
+
+3.1. Click into Nginx application, and then enter into its service page.
+
+
+
+3.2. In this page, make sure its deployment and Pod are running, then click **More → Edit Internet Access**, and select **NodePort** in the dropdown list, click **OK** to save it.
+
+
+
+3.3. At this point, you will be able to access Nginx web service from outside of the cluster.
+
+
+
+
+
diff --git a/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md b/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md
index 2bc812253..3301595a2 100644
--- a/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md
+++ b/content/en/docs/project-user-guide/grayscale-release/blue-green-deployment.md
@@ -1,10 +1,100 @@
---
title: "Blue-green Deployment"
-keywords: 'KubeSphere, kubernetes, docker, helm, jenkins, istio, prometheus'
-description: 'Blue-green Deployment'
+keywords: 'KubeSphere, Kubernetes, service mesh, istio, release, blue-green deployment'
+description: 'How to implement blue-green deployment for an app.'
linkTitle: "Blue-green Deployment"
weight: 2130
---
-TBD
\ No newline at end of file
+
+The blue-green release provides a zero downtime deployment, which means the new version can be deployed with the old one preserved. At any time, only one of the versions is active serving all the traffic, while the other one remains idle. If there is a problem with running, you can quickly roll back to the old version.
+
+
+
+
+## Prerequisites
+
+- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/).
+- You need to create a workspace, a project and an account (`project-regular`). Please refer to [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project) if they are not ready yet.
+- You need to sign in with the `project-admin` account and invite `project-regular` to the corresponding project. Please refer to [these steps to invite a member](../../../quick-start/create-workspace-and-project#task-3-create-a-project).
+- You need to enable **Application Governance** and have an available app so that you can implement the blue-green deployment for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/).
+
+## Create Blue-green Deployment Job
+
+1. Log in KubeSphere as `project-regular`. Under **Categories**, click **Create Job** on the right of **Blue-green Deployment**.
+
+
+
+2. Set a name for it and click **Next**.
+
+
+
+3. Select your app from the drop-down list and the service for which you want to implement the blue-green deployment. If you also use the sample app Bookinfo, select **reviews** and click **Next**.
+
+
+
+4. On the **Grayscale Release Version** page, add another version of it (e.g `v2`) as shown in the image below and click **Next**:
+
+
+
+{{< notice note >}}
+
+The image version is `v2` in the screenshot.
+
+{{ notice >}}
+
+5. To allow the app version `v2` to take over all the traffic, select **Take over all traffic** and click **Create**.
+
+
+
+
+6. The blue-green deployment job created displays under the tab **Job Status**. Click it to view details.
+
+
+
+7. Wait for a while and you can see all the traffic go to the version `v2`:
+
+
+
+8. The new **Deployment** is created as well.
+
+
+
+9. Besides, you can directly get the virtual service to identify the weight by executing the following command:
+
+```bash
+kubectl -n demo-project get virtualservice -o yaml
+```
+
+{{< notice note >}}
+
+- When you execute the command above, replace `demo-project` with your own project (i.e. namespace) name.
+- If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the account `admin`.
+
+{{ notice >}}
+
+10. Expected output:
+
+```yaml
+...
+ spec:
+ hosts:
+ - reviews
+ http:
+ - route:
+ - destination:
+ host: reviews
+ port:
+ number: 9080
+ subset: v2
+ weight: 100
+ ...
+```
+
+## Take a Job Offline
+
+After you implement the blue-green deployment, and the result meets your expectation, you can take the task offline with the version `v1` removed by clicking **Job offline**.
+
+
+
diff --git a/content/en/docs/project-user-guide/grayscale-release/canary-release.md b/content/en/docs/project-user-guide/grayscale-release/canary-release.md
index d9f1aa954..399b91762 100644
--- a/content/en/docs/project-user-guide/grayscale-release/canary-release.md
+++ b/content/en/docs/project-user-guide/grayscale-release/canary-release.md
@@ -1,10 +1,108 @@
---
title: "Canary Release"
-keywords: 'KubeSphere, kubernetes, docker, helm, jenkins, istio, prometheus'
-description: 'Canary Release'
+keywords: 'KubeSphere, Kubernetes, canary release, istio, service mesh'
+description: 'How to implement the canary release for an app.'
linkTitle: "Canary Release"
weight: 2130
---
-TBD
\ No newline at end of file
+On the back of [Istio](https://istio.io/), KubeSphere provides users with necessary control to deploy canary services. In a canary release, you introduce a new version of a service and test it by sending a small percentage of traffic to it. At the same time, the old version is responsible for handling the rest of the traffic. If everything goes well, you can gradually increase the traffic sent to the new version, while simultaneously phasing out the old version. In the case of any occurring issues, KubeSphere allows you to roll back to the previous version as you change the traffic percentage.
+
+This method serves as an efficient way to test performance and reliability of a service. It can help detect potential problems in the actual environment while not affecting the overall system stability.
+
+
+
+## Prerequisites
+
+- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/).
+- You need to create a workspace, a project and an account (`project-regular`). Please refer to [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project) if they are not ready yet.
+- You need to sign in with the `project-admin` account and invite `project-regular` to the corresponding project. Please refer to [these steps to invite a member](../../../quick-start/create-workspace-and-project#task-3-create-a-project).
+- You need to enable **Application Governance** and have an available app so that you can implement the canary release for it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/).
+
+## Create Canary Release Job
+
+1. Log in KubeSphere as `project-regular`. Under **Categories**, click **Create Job** on the right of **Canary Release**.
+
+
+
+2. Set a name for it and click **Next**.
+
+
+
+3. Select your app from the drop-down list and the service for which you want to implement the canary release. If you also use the sample app Bookinfo, select **reviews** and click **Next**.
+
+
+
+4. On the **Grayscale Release Version** page, add another version of it (e.g `v2`) as shown in the image below and click **Next**:
+
+
+
+{{< notice note >}}
+
+The image version is `v2` in the screenshot.
+
+{{ notice >}}
+
+5. You send traffic to these two versions (`v1` and `v2`) either by a specific percentage or by the request content such as `Http Header`, `Cookie` and `URI`. Select **Forward by traffic ratio** and drag the icon in the middle to change the percentage of traffic sent to these two versions respectively (e.g. set 50% for either one). When you finish, click **Create**.
+
+
+
+6. The canary release job created displays under the tab **Job Status**. Click it to view details.
+
+
+
+7. Wait for a while and you can see half of the traffic go to each of them:
+
+
+
+8. The new **Deployment** is created as well.
+
+
+
+9. Besides, you can directly get the virtual service to identify the weight by executing the following command:
+
+```bash
+kubectl -n demo-project get virtualservice -o yaml
+```
+
+{{< notice note >}}
+
+- When you execute the command above, replace `demo-project` with your own project (i.e. namespace) name.
+- If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the account `admin`.
+
+{{ notice >}}
+
+10. Expected output:
+
+```yaml
+...
+spec:
+ hosts:
+ - reviews
+ http:
+ - route:
+ - destination:
+ host: reviews
+ port:
+ number: 9080
+ subset: v1
+ weight: 50
+ - destination:
+ host: reviews
+ port:
+ number: 9080
+ subset: v2
+ weight: 50
+ ...
+```
+## Take a Job Offline
+
+1. After you implement the canary release, and the result meets your expectation, you can select **Take Over** from the menu, sending all the traffic to the new version.
+
+
+
+2. To remove the old version with the new version handling all the traffic, click **Job offline**.
+
+
+
diff --git a/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md b/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md
index 083beeb31..ac8347aed 100644
--- a/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md
+++ b/content/en/docs/project-user-guide/grayscale-release/traffic-mirroring.md
@@ -1,10 +1,107 @@
---
title: "Traffic Mirroring"
-keywords: 'KubeSphere, kubernetes, docker, helm, jenkins, istio, prometheus'
+keywords: 'KubeSphere, Kubernetes, traffic mirroring, istio'
description: 'Traffic Mirroring'
linkTitle: "Traffic Mirroring"
weight: 2130
---
-TBD
\ No newline at end of file
+Traffic mirroring, also called shadowing, is a powerful, risk-free method of testing your app versions as it sends a copy of live traffic to a service that is being mirrored. Namely, you implement a similar setup for acceptance test so that problems can be detected in advance. As mirrored traffic happens out of band of the critical request path for the primary service, your end users will not be affected during the whole process.
+
+## Prerequisites
+
+- You need to enable [KubeSphere Service Mesh](../../../pluggable-components/service-mesh/).
+- You need to create a workspace, a project and an account (`project-regular`). Please refer to [Create Workspace, Project, Account and Role](../../../quick-start/create-workspace-and-project) if they are not ready yet.
+- You need to sign in with the `project-admin` account and invite `project-regular` to the corresponding project. Please refer to [these steps to invite a member](../../../quick-start/create-workspace-and-project#task-3-create-a-project).
+- You need to enable **Application Governance** and have an available app so that you can mirror the traffic of it. The sample app used in this tutorial is Bookinfo. For more information, see [Deploy Bookinfo and Manage Traffic](../../../quick-start/deploy-bookinfo-to-k8s/).
+
+## Create Traffic Mirroring Job
+
+1. Log in KubeSphere as `project-regular`. Under **Categories**, click **Create Job** on the right of **Traffic Mirroring**.
+
+
+
+2. Set a name for it and click **Next**.
+
+
+
+3. Select your app from the drop-down list and the service of which you want to mirror the traffic. If you also use the sample app Bookinfo, select **reviews** and click **Next**.
+
+
+
+4. On the **Grayscale Release Version** page, add another version of it (e.g. `v2`) as shown in the image below and click **Next**:
+
+
+
+{{< notice note >}}
+
+The image version is `v2` in the screenshot.
+
+{{ notice >}}
+
+5. Click **Create** in the final step.
+
+
+
+6. The traffic mirroring job created displays under the tab **Job Status**. Click it to view details.
+
+
+
+7. You can see the traffic is being mirrored to `v2` with real-time traffic displaying in the line chart.
+
+
+
+8. The new **Deployment** is created as well.
+
+
+
+9. Besides, you can directly get the virtual service to view `mirror` and `weight` by executing the following command:
+
+```bash
+kubectl -n demo-project get virtualservice -o yaml
+```
+
+{{< notice note >}}
+
+- When you execute the command above, replace `demo-project` with your own project (i.e. namespace) name.
+- If you want to execute the command from the web kubectl on the KubeSphere console, you need to use the account `admin`.
+
+{{ notice >}}
+
+10. Expected output:
+
+```
+...
+spec:
+ hosts:
+ - reviews
+ http:
+ - route:
+ - destination:
+ host: reviews
+ port:
+ number: 9080
+ subset: v1
+ weight: 100
+ mirror:
+ host: reviews
+ port:
+ number: 9080
+ subset: v2
+ ...
+```
+
+This route rule sends 100% of the traffic to `v1`. The last stanza specifies that you want to mirror to the service `reviews v2`. When traffic gets mirrored, the requests are sent to the mirrored service with their Host/Authority headers appended with `-shadow`. For example, `cluster-1` becomes `cluster-1-shadow`.
+
+{{< notice note >}}
+
+These requests are mirrored as “fire and forget”, which means that the responses are discarded. You can specify the `weight` field to mirror a fraction of the traffic, instead of mirroring all requests. If this field is absent, for compatibility with older versions, all traffic will be mirrored. For more information, see [Mirroring](https://istio.io/v1.5/pt-br/docs/tasks/traffic-management/mirroring/).
+
+{{ notice >}}
+
+## Take a Job Offline
+
+You can remove the traffic mirroring job by clicking **Job offline**, which does not affect the current app version.
+
+
\ No newline at end of file
diff --git a/content/en/docs/project-user-guide/storage/volume-snapshots.md b/content/en/docs/project-user-guide/storage/volume-snapshots.md
index 55768746d..148bc1538 100644
--- a/content/en/docs/project-user-guide/storage/volume-snapshots.md
+++ b/content/en/docs/project-user-guide/storage/volume-snapshots.md
@@ -7,4 +7,25 @@ linkTitle: "Volume Snapshots"
weight: 2130
---
-TBD
\ No newline at end of file
+## Introduction
+Many storage systems provide the ability to create a "snapshot" of a persistent volume.
+A snapshot represents a point-in-time copy of a volume.
+A snapshot can be used either to provision a new volume (pre-populated with the snapshot data)
+or to restore the existing volume to a previous state (represented by the snapshot).
+
+On KubeSphere, requirements for Volume Snapshot are:
+- With 1.17+ Kubernetes
+- Underlying storage plugin supports Snapshot
+
+## Create Volume Snapshot
+Volume Snapshot could be created from an existing volume on the volume detail page.
+
+
+The created Volume Snapshot will be listed in the volume snapshot page.
+
+
+## Apply Volume Snapshot
+Volume Snapshot could be applied to create volume from the snapshot.
+
+
+
diff --git a/content/en/docs/project-user-guide/storage/volumes.md b/content/en/docs/project-user-guide/storage/volumes.md
index b9b129818..9ee31a234 100644
--- a/content/en/docs/project-user-guide/storage/volumes.md
+++ b/content/en/docs/project-user-guide/storage/volumes.md
@@ -1,10 +1,46 @@
---
title: "Volumes"
-keywords: 'kubernetes, docker, helm, jenkins, istio, prometheus'
+keywords: 'kubernetes, docker, persistent volume, persistent volume claim, volume clone, volume snapshot, volume expanding'
description: 'Create Volumes (PVCs)'
linkTitle: "Volumes"
weight: 2110
---
-TBD
+## Introduction
+In this section, volumes always refer to PersistentVolumeClaim(PVC) of Kubernetes.
+
+## Create Volume
+### Method
+There are two methods to create volume:
+- Create empty volume by StorageClass
+- Create volume from VolumeSnapshot
+
+
+
+## Attach Volume onto Workloads
+Take attaching volume onto deployment for example, in the `Mount Volume` step of *Create Deployment*,
+volumes cloud be attached on containers' path.
+
+
+## Volume Features
+Volume Features include:
+- Clone Volume
+- Create Volume Snapshot
+- Expand Volume
+
+KubeSphere can get supported features of underlying storage plugin called `Storage Capability`.
+The console display only supported features in `Volume Detail Page`.
+For more information about `Storage Capability`, see [Design Documentation](https://github.com/kubesphere/community/blob/master/sig-storage/concepts-and-designs/storage-capability-interface.md)
+
+
+
+**Node**: Some in-tree or special CSI plugins may not be covered by **Storage Capability**.
+If Kubesphere did not display the right features in your cluster, you could adjust according to [method](https://github.com/kubesphere/kubesphere/issues/2986).
+
+## Volume Monitoring
+KubeSphere gets metric data of PVC with FileSystem mode from Kubelet to monitor volumes including capacity usage and inode usage.
+
+
+For more information about Volume Monitoring illustrations, see [Research on Volume Monitoring](https://github.com/kubesphere/kubesphere/issues/2921).
+
diff --git a/content/en/docs/quick-start/enable-pluggable-components.md b/content/en/docs/quick-start/enable-pluggable-components.md
index e8bf3e982..ad686e7a7 100644
--- a/content/en/docs/quick-start/enable-pluggable-components.md
+++ b/content/en/docs/quick-start/enable-pluggable-components.md
@@ -22,7 +22,7 @@ This tutorial demonstrates how to enable pluggable components of KubeSphere both
| openpitrix | KubeSphere App Store | Provide an app store for Helm-based applications and allow users to manage apps throughout the entire lifecycle. |
| servicemesh | KubeSphere Service Mesh (Istio-based) | Provide fine-grained traffic management, observability and tracing, and visualized traffic topology. |
-For more information about each component, see Overview of Enable Pluggable Components.
+For more information about each component, see [Overview of Enable Pluggable Components](../../pluggable-components/).
{{< notice note >}}
@@ -36,7 +36,7 @@ For more information about each component, see Overview of Enable Pluggable Comp
### **Installing on Linux**
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+When you implement multi-node installation of KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
@@ -46,11 +46,11 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (e.g. for testing purpose), refer to the following section to see how pluggable components can be installed after installation.
+If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (e.g. for testing purpose), refer to the [following section](#enable-pluggable-components-after-installation) to see how pluggable components can be installed after installation.
{{ notice >}}
-2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [an example file](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md) for your reference. Save the file after you finish.
+2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [an example file](https://github.com/kubesphere/kubekey/blob/release-1.0/docs/config-example.md) for your reference. Save the file after you finish.
3. Create a cluster using the configuration file:
```bash
@@ -59,22 +59,27 @@ If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/),
### Installing on Kubernetes
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install pluggable components, do not use `kubectl apply -f` directly for this file.
+When you install KubeSphere on Kubernetes, you need to execute `kubectl apply -f` first for the installer file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml) as stated in the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/). After that, follow the steps below to enable pluggable components:
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable pluggable components, create a local file cluster-configuration.yaml.
+1. Download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and copy and paste the content of it to a local `cluster-configuration.yaml` file.
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [an example file](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) for your reference. Save the file after you finish.
-4. Execute the following command to start installation:
+2. To enable the pluggable component you want to install, change `false` to `true` for `enabled` under the component in this file.
+3. Save this local file and execute the following command to apply it.
```bash
kubectl apply -f cluster-configuration.yaml
```
+{{< notice warning >}}
+
+You must apply the `kubesphere-installer.yaml` file first before you apply the file `cluster-configuration.yaml`. Wrong execution order or the failure to apply either file can result in installation failure.
+
+{{ notice >}}
+
Whether you install KubeSphere on Linux or on Kubernetes, you can check the status of the components you have enabled in the web console of KubeSphere after installation. Go to **Components**, and you can see an image below:

diff --git a/content/en/docs/quick-start/wordpress-deployment.md b/content/en/docs/quick-start/wordpress-deployment.md
index b1586be59..cae4b835d 100644
--- a/content/en/docs/quick-start/wordpress-deployment.md
+++ b/content/en/docs/quick-start/wordpress-deployment.md
@@ -1,15 +1,15 @@
---
-title: "Compose and Deploy Wordpress"
-keywords: 'KubeSphere, Kubernetes, app, Wordpress'
+title: "Compose and Deploy WordPress"
+keywords: 'KubeSphere, Kubernetes, app, WordPress'
description: 'Compose and deploy Wordpress.'
-linkTitle: "Compose and Deploy Wordpress"
+linkTitle: "Compose and Deploy WordPress"
weight: 3050
---
## WordPress Introduction
-WordPress is a free and open-source content management system written in PHP, allowing users to build their own websites. A complete Wordpress application includes the following Kubernetes objects with MySQL serving as the backend database.
+WordPress is a free and open-source content management system written in PHP, allowing users to build their own websites. A complete WordPress application includes the following Kubernetes objects with MySQL serving as the backend database.

diff --git a/content/en/docs/upgrade/upgrade-overview.md b/content/en/docs/upgrade/upgrade-overview.md
index 79681f9f0..966b7719c 100644
--- a/content/en/docs/upgrade/upgrade-overview.md
+++ b/content/en/docs/upgrade/upgrade-overview.md
@@ -16,6 +16,7 @@ KubeSphere v3.0.0 is compatible with Kubernetes 1.15.x, 1.16.x, 1.17.x and 1.18.
- If your KubeSphere v2.1.x is installed on Kubernetes 1.14.x, you have to upgrade Kubernetes (to 1.15.x+) and KubeSphere (to v3.0.0 ) at the same time.
{{< notice warning >}}
+
There are some significant API changes in Kubernetes 1.16.x compared with prior versions 1.14.x and 1.15.x. Please refer to [Deprecated APIs Removed In 1.16: Here’s What You Need To Know](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for more details. So if you plan to upgrade from Kubernetes 1.14.x/1.15.x to 1.16.x+, you have to migrate some of your workloads after upgrading.
{{ notice >}}
@@ -23,6 +24,7 @@ There are some significant API changes in Kubernetes 1.16.x compared with prior
## Before Upgrade
{{< notice warning >}}
+
- You are supposed to implement a simulation for the upgrade in a testing environment first. After the upgrade is successful in the testing environment and all applications are running normally, upgrade it in your production environment.
- During the upgrade process, there may be a short interruption of applications (especially for those single-replica Pod). Please arrange a reasonable period of time for upgrade.
- It is recommended to back up ETCD and stateful applications before upgrading in a production environment. You can use [Velero](https://velero.io/) to implement backup and migrate Kubernetes resources and persistent volumes.
@@ -31,8 +33,8 @@ There are some significant API changes in Kubernetes 1.16.x compared with prior
## How
-A brand-new installer [KubeKey](https://github.com/kubesphere/kubekey) is introduced in KubeSphere v3.0.0, with which you can install or upgrade Kubernetes and KubeSphere. More details about upgrading with [KubeKey](https://github.com/kubesphere/kubekey) will be covered in [Upgrade with KubeKey](../upgrade-with-kubekey/).
+A brand-new installer [KubeKey](https://github.com/kubesphere/kubekey) is introduced in KubeSphere v3.0.0, with which you can install or upgrade Kubernetes and KubeSphere. More details about upgrading with KubeKey will be covered in [Upgrade with KubeKey](../upgrade-with-kubekey/).
-## KubeKey or ks-installer?
+## KubeKey or ks-installer
[ks-installer](https://github.com/kubesphere/ks-installer/tree/master) was the main installation tool as of KubeSphere v2. For users whose Kubernetes clusters were NOT deployed via [KubeSphere Installer](https://v2-1.docs.kubesphere.io/docs/installation/all-in-one/#step-2-download-installer-package), they should choose ks-installer to upgrade KubeSphere. For example, if your Kubernetes is hosted by cloud vendors or self provisioned, please refer to [Upgrade with ks-installer](../upgrade-with-ks-installer).
diff --git a/content/en/docs/upgrade/upgrade-with-ks-installer.md b/content/en/docs/upgrade/upgrade-with-ks-installer.md
index a8f1b642d..bd005610e 100644
--- a/content/en/docs/upgrade/upgrade-with-ks-installer.md
+++ b/content/en/docs/upgrade/upgrade-with-ks-installer.md
@@ -13,15 +13,15 @@ ks-installer is recommended for users whose Kubernetes clusters were not set up
- You need to have a KubeSphere cluster running version 2.1.1.
-{{< notice warning >}}
+ {{< notice warning >}}
If your KubeSphere version is v2.1.0 or earlier, please upgrade to v2.1.1 first.
-{{ notice >}}
+ {{ notice >}}
- Make sure you read [Release Notes For 3.0.0](../../release/release-v300/) carefully.
-{{< notice warning >}}
+ {{< notice warning >}}
In v3.0.0, KubeSphere refactors many of its components such as Fluent Bit Operator and IAM. Make sure you back up any important components in case you heavily customized them but not from console.
-{{ notice >}}
+ {{ notice >}}
## Step 1: Download YAML files
@@ -47,4 +47,4 @@ kubectl apply -f kubesphere-installer.yaml
```bash
kubectl apply -f cluster-configuration.yaml
-```
\ No newline at end of file
+```
diff --git a/content/en/docs/upgrade/upgrade-with-kubekey.md b/content/en/docs/upgrade/upgrade-with-kubekey.md
index 0f599c5c5..1f981786a 100644
--- a/content/en/docs/upgrade/upgrade-with-kubekey.md
+++ b/content/en/docs/upgrade/upgrade-with-kubekey.md
@@ -12,11 +12,9 @@ KubeKey is recommended for users whose KubeSphere and Kubernetes were both deplo
- You need to have a KubeSphere cluster running version 2.1.1.
-{{< notice warning >}}
-
+ {{< notice warning >}}
If your KubeSphere version is v2.1.0 or earlier, please upgrade to v2.1.1 first.
-
-{{ notice >}}
+ {{ notice >}}
- Download KubeKey.
@@ -31,7 +29,6 @@ wget https://github.com/kubesphere/kubekey/releases/download/v1.0.0/kubekey-v1.0
```
{{ tab >}}
-
{{< tab "For users with poor network connections to GitHub" >}}
Download KubeKey using the following command:
@@ -39,8 +36,8 @@ Download KubeKey using the following command:
```bash
wget -c https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz -O - | tar -xz
```
-{{ tab >}}
+{{ tab >}}
{{ tabs >}}
Make `kk` executable:
@@ -51,15 +48,12 @@ chmod +x kk
- Make sure you read [Release Notes For 3.0.0](../../release/release-v300/) carefully.
-{{< notice warning >}}
-
+ {{< notice warning >}}
In v3.0.0, KubeSphere refactors many of its components such as Fluent Bit Operator and IAM. Make sure you back up any important components in case you heavily customized them but not from console.
-
-{{ notice >}}
+ {{ notice >}}
- Make your upgrade plan. Two upgrading scenarios are documented below.
-
## Upgrade KubeSphere and Kubernetes
Upgrading steps are different for single-node clusters (all-in-one) and multi-node clusters.
diff --git a/content/en/docs/workspaces-administration/import-helm-repository.md b/content/en/docs/workspaces-administration/import-helm-repository.md
new file mode 100644
index 000000000..adac6c936
--- /dev/null
+++ b/content/en/docs/workspaces-administration/import-helm-repository.md
@@ -0,0 +1,39 @@
+---
+title: "Import Helm Repository"
+keywords: "kubernetes, helm, kubesphere, application"
+description: "Import Helm Repository into KubeSphere"
+
+linkTitle: "Import Helm Repository"
+weight: 100
+---
+
+KubeSphere builds application repository services on [OpenPitrix](https://openpitrix.io), the open source cross-cloud application management platform from [QingCloud](https://www.qingcloud.com), which supports for Kubernetes applications based on Helm Chart. In an application repository, each application is a base package repository and if you want to use OpenPitrix for application management, you need to create the repository first. You can store packages to an HTTP/HTTPS server, a [minio](https://docs.min.io/), or an S3 object storage. The application repository is an external storage independent of OpenPitrix, which can be [minio](https://docs.min.io/), QingCloud's QingStor object storage, or AWS object storage, in which the contents are the configuration packages of the application developed by developers. and indexed files. After registering the repository, the stored application configuration packages are automatically indexed as deployable applications.
+
+## Preparing the application repository
+
+The [official Helm documentation](https://helm.sh/docs/topics/chart_repository/#hosting-chart-repositories) already provides several ways to create application repositories, But in this document, we recommend that you use the official KubeSphere helm repo.
+
+- [KubeSphere Official Application Repository](https://charts.kubesphere.io/)
+
+## Adding application repositories
+
+1. Create a Workspace, and then in the Workspace, go to `App Managements → App repos` and click `Add Repo`.
+
+ 
+
+2. In the Add Repository window, fill in the URL with `https://charts.kubesphere.io/main`, and then create the repository after verification.
+
+ - Repository Name: Give a simple and clear name to the repository, which is easy for users to browse and search.
+ - Type: Helm Chart type application is supported.
+ - URL: The following three protocols are supported
+ - The URL is S3 styled, e.g. `s3..qingstor.com//` to access the QingStor service using the S3 interface.
+ - HTTP: readable, not writable, only supports fetching applications from this application repository (object storage) and deploying to the runtime environment, e.g., enter `http://docs-repo.gd2.qingstor.com`. This example contains a sample Nginx application that will be automatically imported into the platform after creation, and can be done in the application template Deployment.
+ - HTTPS: readable, not writable, supports only getting applications from this application repository, supports deployment to a runtime environment.
+
+ - Description information: a brief description of the main features of the application repository to give users a better understanding of the application repository.
+
+3. If the validation is passed, click the **OK** button to complete the addition of the application repository. Once the repository is added, KubeSphere will automatically load all the application templates under the repository.
+
+> Note that the example repository added above is a mirror of Google's Helm repository (we will be developing a commercial version of the application repository for enterprise use in the future), and some of these applications may not be successfully deployed.
+
+In an on-premises private cloud scenario, you can build your own repository based on [Helm](https://helm.sh), and develop and upload applications to your repository that meet your business needs, and then deploy them for distribution based on KubeSphere.
diff --git a/content/en/docs/workspaces-administration/release-v211.md b/content/en/docs/workspaces-administration/release-v211.md
deleted file mode 100644
index d74285d36..000000000
--- a/content/en/docs/workspaces-administration/release-v211.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: "Import Helm Repository"
-keywords: "kubernetes, helm, kubesphere, application"
-description: "Import Helm Repository into KubeSphere"
-
-linkTitle: "Import Helm Repository"
-weight: 100
----
-
-TBD
diff --git a/content/en/docs/workspaces-administration/release-v300.md b/content/en/docs/workspaces-administration/release-v300.md
deleted file mode 100644
index dae816590..000000000
--- a/content/en/docs/workspaces-administration/release-v300.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: "Upload Helm-based Application"
-keywords: "kubernetes, helm, kubesphere, openpitrix, application"
-description: "Upload Helm-based Application"
-
-linkTitle: "Upload Helm-based Application"
-weight: 50
----
-
-TBD
diff --git a/content/en/docs/workspaces-administration/upload-helm-based-application.md b/content/en/docs/workspaces-administration/upload-helm-based-application.md
new file mode 100644
index 000000000..b37041d3d
--- /dev/null
+++ b/content/en/docs/workspaces-administration/upload-helm-based-application.md
@@ -0,0 +1,43 @@
+---
+title: "Upload Helm-based Application"
+keywords: "kubernetes, helm, kubesphere, openpitrix, application"
+description: "Upload Helm-based Application"
+
+linkTitle: "Upload Helm-based Application"
+weight: 50
+---
+
+KubeSphere provides full lifecycle management for applications. You can upload or create new app templates and test them quickly. In addition, you can publish your apps to App Store so that other users can deploy with one click. You can upload [Helm Chart](https://helm.sh/) to develop app templates.
+
+## Prerequisites
+
+- You need to create a workspace and `project-admin` account. Please refer to the [Getting Started with Multi-tenant Management](../../../quick-start/create-workspace-and-project) if not yet.
+- You need to sign in with `project-admin` account.
+
+## Hands-on Lab
+
+Go to the workspace, open `Apps Management` and go to `App Templates`, then click the `Create` button.
+
+
+
+Click the `Upload` button.
+
+
+
+Assuming you've already developed a Helm chart locally, or you can download the [Helm package](/files/application-templates/nginx-0.1.0.tgz) here.
+
+
+
+Select the Helm chart file you have finished developing locally and click `OK` to proceed to the next step.
+
+
+
+
+
+Now that you have successfully uploaded a Helm package, you can click on its name to go to its detail page.
+
+
+
+On the versions list tab, you can click on the corresponding version to test the deployment.
+
+
diff --git a/content/en/partner/_index.md b/content/en/partner/_index.md
index 846dcd10d..ac5adf2f0 100644
--- a/content/en/partner/_index.md
+++ b/content/en/partner/_index.md
@@ -26,7 +26,7 @@ section3:
tip: Apply now →
partnerType:
- title: "App Providers"
- content: "KubeSphere Application Store is a great place to showcase your applications. KubeSphere brings your applications to tens of thousands of users, allowing them to deploy your App to Kubernetes with one click."
+ content: "KubeSphere App Store is a great place to showcase your applications. KubeSphere brings your applications to tens of thousands of users, allowing them to deploy your App to Kubernetes with one click."
link: "request"
- title: "Technology"
diff --git a/content/en/reason/_index.md b/content/en/reason/_index.md
deleted file mode 100644
index e9ce638d8..000000000
--- a/content/en/reason/_index.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: "reason"
-
----
\ No newline at end of file
diff --git a/content/tr/partner/_index.md b/content/tr/partner/_index.md
index 73935b3be..f52e59bb2 100644
--- a/content/tr/partner/_index.md
+++ b/content/tr/partner/_index.md
@@ -21,19 +21,19 @@ section3:
tip: Request now →
partnerType:
- title: "App Providers"
- content: "KubeSphere Application Store is a great place to showcase your application, KubeSphere bring your applications to tens of thousands of users, making them deploy your App to Kubernetes with one click."
+ content: "KubeSphere App Store is a great place to showcase your application, KubeSphere bring your applications to tens of thousands of users, making them deploy your App to Kubernetes with one click."
link: ""
- title: "Consulting"
- content: "KubeSphere Application Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere Application Store now!"
+ content: "KubeSphere App Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere App Store now!"
link: ""
- title: "Cloud Providers"
- content: "KubeSphere Application Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere Application Store now!"
+ content: "KubeSphere App Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere App Store now!"
link: ""
- title: "Go-To-Market"
- content: "KubeSphere Application Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere Application Store now!"
+ content: "KubeSphere App Store is a great place to showcase your application, users can quickly deploy your application to Kubernetes using KubeSphere. Submit your application to KubeSphere App Store now!"
link: ""
section4:
diff --git a/content/zh/api/crd.md b/content/zh/api/crd.md
new file mode 100644
index 000000000..4cbf33361
--- /dev/null
+++ b/content/zh/api/crd.md
@@ -0,0 +1,8 @@
+---
+title: KubeSphere Api Documents
+description: KubeSphere Api Documents
+keywords: KubeSphere, KubeSphere Documents, Kubernetes
+
+swaggerUrl: json/crd.json
+---
+
diff --git a/content/zh/api/kubesphere.md b/content/zh/api/kubesphere.md
new file mode 100644
index 000000000..d5ef99133
--- /dev/null
+++ b/content/zh/api/kubesphere.md
@@ -0,0 +1,7 @@
+---
+title: KubeSphere Api Documents
+description: KubeSphere Api Documents
+keywords: KubeSphere, KubeSphere Documents, Kubernetes
+
+swaggerUrl: json/kubesphere.json
+---
\ No newline at end of file
diff --git a/content/zh/blogs/tidb-on-kbesphere-using-qke.md b/content/zh/blogs/tidb-on-kbesphere-using-qke.md
new file mode 100644
index 000000000..7bad64532
--- /dev/null
+++ b/content/zh/blogs/tidb-on-kbesphere-using-qke.md
@@ -0,0 +1,235 @@
+---
+title: 'KubeSphere 部署 TiDB 云原生分布式数据库'
+tag: 'TiDB, Kubernetes, KubeSphere, TiKV, prometheus'
+createTime: '2020-10-29'
+author: 'Will, FeynmanZhou, Yaqiong Liu'
+snapshot: 'https://ap3.qingstor.com/kubesphere-website/docs/20201028212049.png'
+---
+
+
+
+## TiDB 简介
+
+[TiDB](https://pingcap.com/) 是 PingCAP 公司自主设计、研发的开源分布式关系型数据库,具备水平扩容或者缩容、金融级高可用、实时 HTAP、云原生的分布式数据库、兼容 MySQL 5.7 协议和 MySQL 生态等重要特性。TiDB 适合高可用、强一致要求较高、数据规模较大等各种应用场景。
+
+
+
+
+## KubeSphere 简介
+
+[KubeSphere](https://kubesphere.io) 是在 Kubernetes 之上构建的以应用为中心的多租户容器平台,完全开源,支持多云与多集群管理,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。
+
+
+
+## 部署环境准备
+
+KubeSphere 是由青云 QingCloud 开源的容器平台,**支持在任何基础设施上安装部署**。在青云公有云上支持一键部署 KubeSphere(QKE)。
+
+下面以在青云云平台快速启用 KubeSphere 容器平台为例部署 TiDB 分布式数据库,至少需要准备 3 个可调度的 node 节点。你也可以在任何 Kubernetes 集群或 Linux 系统上安装 KubeSphere,可以参考 [KubeSphere 官方文档](https://kubesphere.io/docs)。
+
+1. 登录青云控制台:[https://console.qingcloud.com/](https://console.qingcloud.com/),点击左侧容器平台,选择 KubeSphere,点击创建并选择合适的集群规格:
+
+
+
+2. 创建完成后登录到 KubeSphere 平台界面:
+
+
+
+3. 点击下方的 Web Kubectl 集群客户端命令行工具,连接到 Kubectl 命令行界面。执行以下命令安装 TiDB Operator CRD:
+
+```shell
+kubectl apply -f https://raw.githubusercontent.com/pingcap/TiDB-Operator/v1.1.6/manifests/crd.yaml
+```
+
+4. 执行后的返回结果如下:
+
+
+
+5. 点击左上角平台管理,选择访问控制,新建企业空间,这里命名为 `dev-workspace`
+
+
+
+6. 进入企业空间,选择应用仓库,添加一个 TiDB 的应用仓库:
+
+
+
+7. 将 PingCap 官方 Helm 仓库添加到 KubeSphere 容器平台,Helm 仓库地址如下:
+
+```shell
+https://charts.pingcap.org
+```
+
+8. 添加方式如下:
+
+
+
+## 部署 TiDB-Operator
+
+1. 首选创建一个项目(Namespace)用于运行 TiDB 集群:
+
+
+
+2. 创建完成后点击进入项目,选择应用,部署新应用
+
+
+
+3. 选择来自应用模板:
+
+
+
+
+4. 选择 `pingcap`,该仓库包含了多个 helm chart,当前主要部署 `TiDB-Operator` 和`tidb-cluster`。
+
+
+
+5. 点击 `TiDB-Operator` 进入 Chart 详情页,点击配置文件可查看或下载默认的 `values.yaml`,选择版本,点击部署:
+
+
+
+6. 配置应用名称并选择应用版本,确认应用部署位置:
+
+
+
+7. 继续下一步,该步骤可以在界面直接编辑 `values.yaml` 文件,自定义配置,当前保留默认即可:
+
+
+
+8. 点击部署,等待应用状态变为活跃:
+
+
+
+9. 点击工作负载(Deployment),查看 TiDB-Operator 部署了 2 个 Deployment 类型资源:
+
+
+
+
+## 部署 TiDB-Cluster
+
+1. TiDB-Operator 部署完成后,可以继续部署 TiDB-Cluster。与部署 TiDB-Operator 操作相同,选择左侧应用,点击 tidb-cluster:
+
+
+
+2. 切换到配置文件,选择版本,下载 `values.yaml`到本地:
+
+
+
+3. TiDB Cluster 中部分组件需要持久存储卷,青云公有云平台提供了以下几种类型的 StorageClass:
+
+```shell
+/ # kubectl get sc
+NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
+csi-high-capacity-legacy csi-qingcloud Delete Immediate true 101m
+csi-high-perf csi-qingcloud Delete Immediate true 101m
+csi-ssd-enterprise csi-qingcloud Delete Immediate true 101m
+csi-standard (default) csi-qingcloud Delete Immediate true 101m
+csi-super-high-perf csi-qingcloud Delete Immediate true 101m
+```
+
+4. 这里选择 csi-standard 类型,`values.yaml` 中的 `StorageClassName` 字段默认配置为 `local-storage`。因此,在下载的 yaml 文件中直接替换所有的 `local-storage` 字段为 `csi-standard`。在最后一步使用修改后的 `values.yaml` 覆盖应用配置文本框中的内容,当然也可以手动编辑配置文件逐个替换:
+
+
+
+5. 这里仅修改 `storageClassName` 字段用于引用外部持久存储,如果需要将 tidb、tikv或 pd 组件调度到独立节点,可参考 nodeAffinity 相关参数进行修改。点击部署,将 tidb cluster 部署到容器平台,最终在应用列表中可以看到如下 2 个应用:
+
+
+
+## 查看 TiDB 集群监控
+
+1. TiDB 集群部署后需要一定时间完成初始化,选择工作负载,查看 Deployment 无状态应用:
+
+
+
+2. 查看有状态副本集(StatefulSets),其中 tidb、tikv 和 pd 等组件都为有状态应用:
+
+
+
+3. 在 KubeSphere 监控面板查看 tidb 负载情况,可以看到 CPU、内存、网络流出速率有明显的变化:
+
+
+
+4. 在 KubeSphere 监控面板查看 TiKV 负载情况:
+
+
+
+
+5. 查看容器组(Pod)列表,tidb 集群包含了 3 个 pd、2 个 tidb 以及 3 个 tikv 组件:
+
+
+
+6. 点击存储管理,查看存储卷,其中 tikv 和 pd 这 2 个组件使用了持久化存储:
+
+
+
+7. 查看某个存储卷用量信息,以 tikv 为例,可以看到当前存储的存储容量和剩余容量等监控数据。
+
+
+
+8. 在 KubeSphere 项目首页查看 tidb-cluster 项目中资源用量排行:
+
+
+
+## 访问 TiDB 集群
+
+1. 点击左侧服务,查看 TiDB 集群创建和暴露的服务信息。
+
+
+
+2. 其中 TiDB 服务 4000 端口绑定的服务类型为nodeport,直接可以在集群外通过 nodeIP 访问。测试使用 MySQL 客户端连接数据库。
+
+```shell
+[root@k8s-master1 ~]# docker run -it --rm mysql bash
+
+[root@0d7cf9d2173e:/# mysql -h 192.168.1.102 -P 32682 -u root
+Welcome to the MySQL monitor. Commands end with ; or \g.
+Your MySQL connection id is 201
+Server version: 5.7.25-TiDB-v4.0.6 TiDB Server (Apache License 2.0) Community Edition, MySQL 5.7 compatible
+
+Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
+
+Oracle is a registered trademark of Oracle Corporation and/or its
+affiliates. Other names may be trademarks of their respective
+owners.
+
+Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
+
+mysql> show databases;
++--------------------+
+| Database |
++--------------------+
+| INFORMATION_SCHEMA |
+| METRICS_SCHEMA |
+| PERFORMANCE_SCHEMA |
+| mysql |
+| test |
++--------------------+
+5 rows in set (0.01 sec)
+
+mysql>
+```
+
+## 查看 Grafana 监控面板
+
+另外,TiDB 自带了 Prometheus 和 Grafana,用于数据库集群的性能监控,可以看到Grafana 界面的 Serivce 3000 端口同样绑定了 NodePort 端口。访问 Grafana UI,查看某个指标:
+
+
+
+
+## 总结
+
+KubeSphere 容器平台对于云原生应用部署非常友好,对于还不熟悉 Kubernetes 的应用开发者而又希望通过在界面简单配置完成 TiDB 集群的部署,可以参考以上步骤快速上手。我们将在下一期的文章中,为大家分享另一种部署玩法:将 TiDB 应用上架到 KubeSphere 应用商店实现真正的一键部署。
+
+另外,TiDB 还可以结合 KubeSphere 的多集群联邦功能,部署 TiDB 应用时可一键分发 TiDB 不同的组件副本到不同基础设施环境的多个 Kubernetes 集群,实现跨集群、跨区域的高可用。如果大家感兴趣,我们将在后续的文章中为大家分享 TiDB 在 KubeSphere 实现多集群联邦的混合云部署架构。
+
+## 参考
+
+**KubeSphere GitHub**: https://github.com/kubesphere/kubesphere
+
+**TiDB GitHub**: https://github.com/pingcap/TiDB
+
+**TiDB Operator 快速入门**: https://github.com/pingcap/docs-TiDB-Operator/blob/master/zh/get-started.md
+
+**TiDB-Operator 文档**: https://docs.pingcap.com/tidb-in-kubernetes/stable/TiDB-Operator-overview
+
+**KubeSphere Introduction**: https://kubesphere.io/docs/introduction/what-is-kubesphere/
+
+**KubeSphere Documentation**: https://kubesphere.io/docs/
diff --git a/content/zh/case/vng.md b/content/zh/case/vng.md
index fc43d3c66..cd30ac320 100644
--- a/content/zh/case/vng.md
+++ b/content/zh/case/vng.md
@@ -6,7 +6,7 @@ css: scss/case-detail.scss
section1:
title: VNG
- content: VNG Coporation 是越南领先的互联网科技公司。在 2014 年,我们被评为越南唯一一家估值 10 亿美元的创业公司。VNG 推出了许多重要产品,比如 Zalo、ZaloPay 和 Zing 等,吸引了数亿用户。
+ content: VNG Corporation 是越南领先的互联网科技公司。在 2014 年,我们被评为越南唯一一家估值 10 亿美元的创业公司。VNG 推出了许多重要产品,比如 Zalo、ZaloPay 和 Zing 等,吸引了数亿用户。
image: https://pek3b.qingstor.com/kubesphere-docs/png/20200619222719.png
section2:
diff --git a/content/zh/contribution/_index.md b/content/zh/contribution/_index.md
index 1088e0cda..eaebdcb3b 100644
--- a/content/zh/contribution/_index.md
+++ b/content/zh/contribution/_index.md
@@ -15,9 +15,9 @@ section2:
icon2: 'images/contribution/37.png'
children:
- content: 'Download KubeSphere'
- link: 'https://kubesphere.io/docs/installation/intro/'
+ link: '../../../zh/docs/quick-start/all-in-one-on-linux/'
- content: 'Quickstart'
- link: 'https://kubesphere.io/docs/quick-start/admin-quick-start/'
+ link: '../../../zh/docs/quick-start/create-workspace-and-project/'
- content: 'Tutorial Videos'
link: '../videos'
@@ -84,7 +84,7 @@ section3:
- name: 'Apps'
icon: '/images/contribution/apps.svg'
iconActive: '/images/contribution/apps-active.svg'
- content: 'App charts for the built-in Application Store'
+ content: 'App charts for the built-in App Store'
link: 'https://github.com/kubesphere/community/tree/master/sig-apps'
linkContent: 'Join SIG - Apps →'
children:
@@ -92,7 +92,7 @@ section3:
- icon: '/images/contribution/calicq2.jpg'
- icon: '/images/contribution/calicq3.jpg'
- - name: 'Application Store'
+ - name: 'App Store'
icon: '/images/contribution/app-store.svg'
iconActive: '/images/contribution/app-store-active.svg'
content: 'App Store, App template management'
diff --git a/content/zh/docs/access-control-and-account-management/_index.md b/content/zh/docs/access-control-and-account-management/_index.md
new file mode 100644
index 000000000..a7cdc5aee
--- /dev/null
+++ b/content/zh/docs/access-control-and-account-management/_index.md
@@ -0,0 +1,11 @@
+---
+title: "账户管理和权限控制"
+description: "账户管理和权限控制"
+layout: "single"
+
+linkTitle: "账户管理和权限控制"
+weight: 4500
+
+icon: "/images/docs/docs.svg"
+
+---
diff --git a/content/zh/docs/access-control-and-account-management/oauth2.md b/content/zh/docs/access-control-and-account-management/oauth2.md
new file mode 100644
index 000000000..de638dfaf
--- /dev/null
+++ b/content/zh/docs/access-control-and-account-management/oauth2.md
@@ -0,0 +1,129 @@
+---
+title: "OAuth2 Identity Provider"
+keywords: 'kubernetes, kubesphere, OAuth2, Identity Provider'
+description: 'OAuth2 Identity Provider'
+
+weight: 2240
+---
+
+## 概览
+
+KubeSphere 可以通过标准的 OAuth2 协议对接外部的 OAuth2 Provider,通过外部 OAuth2 Server 完成账户认证后可以关联登录到 KubeSphere。
+完整的认证流程如下:
+
+
+
+## GitHubIdentityProvider
+
+KubeSphere 默认提供了 GitHubIdentityProvider 做为 OAuth2 认证插件的开发示例,配置及使用方式如下:
+
+### 参数配置
+
+IdentityProvider 的参数通过 kubesphere-system 项目下 kubesphere-config 这个 ConfigMap 进行配置
+
+通过 `kubectl -n kubesphere-system edit cm kubesphere-config` 进行编辑,配置示例:
+
+```yaml
+apiVersion: v1
+data:
+ kubesphere.yaml: |
+ authentication:
+ authenticateRateLimiterMaxTries: 10
+ authenticateRateLimiterDuration: 10m0s
+ loginHistoryRetentionPeriod: 7d
+ maximumClockSkew: 10s
+ multipleLogin: true
+ kubectlImage: kubesphere/kubectl:v1.0.0
+ jwtSecret: "jwt secret"
+ oauthOptions:
+ accessTokenMaxAge: 1h
+ accessTokenInactivityTimeout: 30m
+ identityProviders:
+ - name: github
+ type: GitHubIdentityProvider
+ mappingMethod: mixed
+ provider:
+ clientID: 'Iv1.547165ce1cf2f590'
+ clientSecret: 'c53e80ab92d48ab12f4e7f1f6976d1bdc996e0d7'
+ endpoint:
+ authURL: 'https://github.com/login/oauth/authorize'
+ tokenURL: 'https://github.com/login/oauth/access_token'
+ redirectURL: 'https://ks-console/oauth/redirect'
+ scopes:
+ - user
+ ...
+```
+
+在 `authentication.oauthOptions.identityProviders` 下增加 GitHubIdentityProvider 的配置块,参数示意:
+
+| 字段 | 说明 |
+|-----------|-------------|
+| name | IdentityProvider 的唯一名称 |
+| type | IdentityProvider 插件的类型,GitHubIdentityProvider 是一种默认实现的类型 |
+| mappingMethod | 账户关联配置,详细说明: https://github.com/kubesphere/kubesphere/blob/master/pkg/apiserver/authentication/oauth/oauth_options.go#L37-L44 |
+| clientID | OAuth2 client ID |
+| clientSecret | OAuth2 client secret |
+| authURL | OAuth2 endpoint |
+| tokenURL | OAuth2 endpoint |
+| redirectURL | 重定向到 ks-console 的跳转路径`https://ks-console/oauth/redirect` |
+
+重启 ks-apiserver 以更新配置: `kubectl -n kubesphere-system rollout restart deploy ks-apiserver`,重启完成后打开前端页面可以看到通过 `通过 github 登录` 按钮
+
+
+
+### 通过 Github 账户登录 KubeSphere
+
+
+
+
+
+账户登录到 KubeSphere 之后就可以被添加、邀请到启用空间中[参与项目协同](https://kubesphere.io/docs/workspaces-administration/role-and-member-management) 。
+
+## OAuth2 插件开发
+
+OAuth2 作为一个开放协议,解决了 API 认证授权的问题,进行账户接入还需要对用户信息接口和字段进行适配,您可以参照 [GitHubIdentityProvider](https://github.com/kubesphere/kubesphere/blob/master/pkg/apiserver/authentication/identityprovider/github/github.go) 、 [AliyunIDaasProvider](https://github.com/kubesphere/kubesphere/blob/master/pkg/apiserver/authentication/identityprovider/aliyunidaas/idaas.go) 这两个插件进行开发,以接入您私有的账户体系。
+
+插件开发流程:
+
+### 实现 `OAuthProvider` 接口
+
+```go
+type OAuthProvider interface {
+ Type() string
+ Setup(options *oauth.DynamicOptions) (OAuthProvider, error)
+ IdentityExchange(code string) (Identity, error)
+}
+```
+
+插件通过 kubesphere-config 中 `authentication.oauthOptions.identityProviders` 部分进行配置,其中 provider 是动态配置, 也就是插件中的 `*oauth.DynamicOptions`。
+
+### 插件注册
+
+注册插件
+
+`pkg/apiserver/authentication/identityprovider/github/github.go`
+
+```go
+func init() {
+ identityprovider.RegisterOAuthProvider(&Github{})
+}
+```
+
+启用插件
+
+`/pkg/apiserver/authentication/options/authenticate_options.go`
+
+```go
+import (
+ "fmt"
+ "github.com/spf13/pflag"
+ _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/aliyunidaas"
+ _ "kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider/github"
+ "kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
+ "time"
+)
+```
+
+### 构建镜像
+
+[构建 ks-apiserver 的镜像](https://github.com/kubesphere/community/blob/104bab42f67094930f2ca87c603b7c6365cd092a/developer-guide/development/quickstart.md) 后部署到您的集群中,参照 GitHubIdentityProvider 的使用流程启用您新开发的插件。
diff --git a/content/zh/docs/application-store/_index.md b/content/zh/docs/application-store/_index.md
index bc9c43c71..a604a5a40 100644
--- a/content/zh/docs/application-store/_index.md
+++ b/content/zh/docs/application-store/_index.md
@@ -1,9 +1,9 @@
---
-title: "Application Store"
+title: "App Store"
description: "Getting started with KubeSphere DevOps project"
layout: "single"
-linkTitle: "Application Store"
+linkTitle: "App Store"
weight: 4500
icon: "/images/docs/docs.svg"
diff --git a/content/zh/docs/cluster-administration/cluster-settings/_index.md b/content/zh/docs/cluster-administration/cluster-settings/_index.md
new file mode 100644
index 000000000..89e5f2112
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/_index.md
@@ -0,0 +1,7 @@
+---
+linkTitle: "Cluster Settings"
+weight: 4180
+
+_build:
+ render: false
+---
diff --git a/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md b/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md
new file mode 100644
index 000000000..abb194b58
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/cluster-visibility-and-authorization.md
@@ -0,0 +1,54 @@
+---
+title: "Cluster Visibility and Authorization"
+keywords: "Cluster Visibility, Cluster Management"
+description: "Cluster Visibility"
+
+linkTitle: "Cluster Visibility and Authorization"
+weight: 200
+---
+
+## Objective
+This guide demonstrates how to set up cluster visibility. You can limit which clusters workspace can use with cluster visibility settings.
+
+## Prerequisites
+* You need to enable [Multi-cluster Management](/docs/multicluster-management/enable-multicluster/direct-connection/).
+* You need to create at least one workspace.
+
+## Set cluster visibility
+
+In KubeSphere, clusters can be authorized to multiple workspaces, and workspaces can also be associated with multiple clusters.
+
+### Set up available clusters when creating workspace
+
+1. Log in to an account that has permission to create a workspace, such as `ws-manager`.
+2. Open the **Platform** menu to enter the **Access Control** page, and then enter the **Workspaces** list page from the sidebar.
+3. Click the **Create** button.
+4. Fill in the form and click the **Next** button.
+5. Then you can see a list of clusters, and you can check to set which clusters workspace can use.
+
+6. After the workspace is created, the members of the workspace can use the resources in the associated cluster.
+
+
+{{< notice warning >}}
+
+Please try not to create resources on the host cluster to avoid excessive loads, which can lead to a decrease in the stability across clusters.
+
+{{ notice >}}
+
+### Set cluster visibility after the workspace is created
+
+After the workspace is created, you can also add or cancel the cluster authorization. Please follow the steps below to adjust the visibility of a cluster.
+
+1. Log in to an account that has permission to manage clusters, such as `cluster-manager`.
+2. Open the **Platform** menu to enter the **Clusters Management** page, and then Click a cluster to enter the Single **Cluster Management** page.
+3. Expand the **Cluster Settings** sidebar and click on the **Cluster Visibility** menu.
+4. You can see the list of authorized workspaces.
+5. Click the **Edit Visibility** button to set the cluster authorization scope by adjusting the position of the workspace in the **Authorized/Unauthorized** list.
+
+
+
+### Public cluster
+
+You can check **Set as public cluster** when setting cluster visibility.
+
+A public cluster means all platform users can access the cluster, in which they are able to create and schedule resources.
diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md
new file mode 100644
index 000000000..d445368ee
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/_index.md
@@ -0,0 +1,7 @@
+---
+linkTitle: "Log collection"
+weight: 2000
+
+_build:
+ render: false
+---
\ No newline at end of file
diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md
new file mode 100644
index 000000000..08cbc6fb2
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-es-as-receiver.md
@@ -0,0 +1,37 @@
+---
+title: "Add Elasticsearch as receiver (aka Collector)"
+keywords: 'kubernetes, log, elasticsearch, pod, container, fluentbit, output'
+description: 'Add Elasticsearch as log receiver to receive container logs'
+
+linkTitle: "Add Elasticsearch as Receiver"
+weight: 2200
+---
+KubeSphere supports using Elasticsearch, Kafka and Fluentd as log receivers.
+This doc will demonstrate how to add an Elasticsearch receiver.
+
+## Prerequisite
+
+Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components following [Enable Pluggable Components](https://kubesphere.io/docs/pluggable-components/). The `logging` component is enabled as an example in this doc.
+
+1. To add a log receiver:
+
+- Login KubeSphere with an account of ***platform-admin*** role
+- Click ***Platform*** -> ***Clusters Management***
+- Select a cluster if multiple clusters exist
+- Click ***Cluster Settings*** -> ***Log Collections***
+- Log receivers can be added by clicking ***Add Log Collector***
+
+
+
+2. Choose ***Elasticsearch*** and fill in the Elasticsearch service address and port like below:
+
+
+
+3. Elasticsearch appears in the receiver list of ***Log Collections*** page and its status becomes ***Collecting***.
+
+
+
+4. Verify whether Elasticsearch is receiving logs sent from Fluent Bit:
+
+- Click ***Log Search*** in the ***Toolbox*** in the bottom right corner.
+- You can search logs in the logging console that appears.
diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md
new file mode 100644
index 000000000..278fbe438
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-fluentd-as-receiver.md
@@ -0,0 +1,155 @@
+---
+title: "Add Fluentd as Receiver (aka Collector)"
+keywords: 'kubernetes, log, fluentd, pod, container, fluentbit, output'
+description: 'KubeSphere Installation Overview'
+
+linkTitle: "Add Fluentd as Receiver"
+weight: 2400
+---
+KubeSphere supports using Elasticsearch, Kafka and Fluentd as log receivers.
+This doc will demonstrate:
+
+- How to deploy Fluentd as deployment and create corresponding service and configmap.
+- How to add Fluentd as a log receiver to receive logs sent from Fluent Bit and then output to stdout.
+- How to verify if Fluentd receives logs successfully.
+
+## Prerequisites
+
+- Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components following [Enable Pluggable Components](https://kubesphere.io/docs/pluggable-components/). The `logging` component is enabled as an example in this doc.
+- To configure log collection, you should use an account of ***platform-admin*** role.
+
+## Step 1: Deploy Fluentd as a deployment
+
+Usually, Fluentd is deployed as a daemonset in K8s to collect container logs on each node. KubeSphere chooses Fluent Bit for this purpose because of its low memory footprint. Besides, Fluentd features numerous output plugins. Hence, KubeSphere chooses to deploy Fluentd as a deployment to forward logs it receives from Fluent Bit to more destinations such as S3, MongoDB, Cassandra, MySQL, syslog and Splunk.
+
+To deploy Fluentd as a deployment, you simply need to open the ***kubectl*** console in ***KubeSphere Toolbox*** and run the following command:
+
+{{< notice note >}}
+
+- The following command will deploy Fluentd deployment, service and configmap into the `default` namespace and add a filter to Fluentd configmap to exclude logs from the `default` namespace to avoid Fluent Bit and Fluentd loop logs collection.
+- You'll need to change all these `default` to the namespace you selected if you want to deploy to a different namespace.
+
+{{ notice >}}
+
+```yaml
+cat <
+ @type forward
+ port 24224
+
+
+ # Because this will send logs Fluentd received to stdout,
+ # to avoid Fluent Bit and Fluentd loop logs collection,
+ # add a filter here to avoid sending logs from the default namespace to stdout again
+
+ @type grep
+
+ key $.kubernetes.namespace_name
+ pattern /^default$/
+
+
+
+ # Send received logs to stdout for demo/test purpose only
+ # Various output plugins are supported to output logs to S3, MongoDB, Cassandra, MySQL, syslog and Splunk etc.
+
+ @type stdout
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: fluentd
+ name: fluentd
+ namespace: default
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: fluentd
+ template:
+ metadata:
+ labels:
+ app: fluentd
+ spec:
+ containers:
+ - image: fluentd:v1.9.1-1.0
+ imagePullPolicy: IfNotPresent
+ name: fluentd
+ ports:
+ - containerPort: 24224
+ name: forward
+ protocol: TCP
+ - containerPort: 5140
+ name: syslog
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /fluentd/etc
+ name: config
+ readOnly: true
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: fluentd-config
+ name: config
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: fluentd-svc
+ name: fluentd-svc
+ namespace: default
+spec:
+ ports:
+ - name: forward
+ port: 24224
+ protocol: TCP
+ targetPort: forward
+ selector:
+ app: fluentd
+ sessionAffinity: None
+ type: ClusterIP
+EOF
+```
+
+## Step 2: Add Fluentd as log receiver (aka collector)
+
+1. To add a log receiver:
+
+- Login KubeSphere with an account of ***platform-admin*** role
+- Click ***Platform*** -> ***Clusters Management***
+- Select a cluster if multiple clusters exist
+- Click ***Cluster Settings*** -> ***Log Collections***
+- Log receivers can be added by clicking ***Add Log Collector***
+
+
+
+2. Choose ***Fluentd*** and fill in the Fluentd service address and port like below:
+
+
+
+3. Fluentd appears in the receiver list of ***Log Collections*** UI and its status shows ***Collecting***.
+
+
+
+
+4. Verify whether Fluentd is receiving logs sent from Fluent Bit:
+
+- Click ***Application Workloads*** in the ***Cluster Management*** UI.
+- Select ***Workloads*** and then select the `default` namespace in the ***Workload*** - ***Deployments*** tab
+- Click the ***fluentd*** item and then click the ***fluentd-xxxxxxxxx-xxxxx*** pod
+- Click the ***fluentd*** container
+- In the ***fluentd*** container page, select the ***Container Logs*** tab
+
+You'll see logs begin to scroll up continuously.
+
+
\ No newline at end of file
diff --git a/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md
new file mode 100644
index 000000000..d2aa50fae
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/log-collections/add-kafka-as-receiver.md
@@ -0,0 +1,133 @@
+---
+title: "Add Kafka as Receiver (aka Collector)"
+keywords: 'kubernetes, log, kafka, pod, container, fluentbit, output'
+description: 'KubeSphere Installation Overview'
+
+linkTitle: "Add Kafka as Receiver"
+weight: 2300
+---
+KubeSphere supports using Elasticsearch, Kafka and Fluentd as log receivers.
+This doc will demonstrate:
+
+- Deploy [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) and then create a Kafka cluster and a Kafka topic by creating `Kafka` and `KafkaTopic` CRDs.
+- Add Kafka log receiver to receive logs sent from Fluent Bit
+- Verify whether the Kafka cluster is receiving logs using [Kafkacat](https://github.com/edenhill/kafkacat)
+
+## Prerequisite
+
+Before adding a log receiver, you need to enable any of the `logging`, `events` or `auditing` components following [Enable Pluggable Components](https://kubesphere.io/docs/pluggable-components/). The `logging` component is enabled as an example in this doc.
+
+## Step 1: Create a Kafka cluster and a Kafka topic
+
+{{< notice note >}}
+
+If you already have a Kafka cluster, you can start from Step 2.
+
+{{ notice >}}
+
+You can use [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) to create a Kafka cluster and a Kafka topic
+
+1. Install [strimzi-kafka-operator](https://github.com/strimzi/strimzi-kafka-operator) to the `default` namespace:
+
+```bash
+helm repo add strimzi https://strimzi.io/charts/
+helm install --name kafka-operator -n default strimzi/strimzi-kafka-operator
+```
+
+2. Create a Kafka cluster and a Kafka topic in the `default` namespace:
+
+To deploy a Kafka cluster and create a Kafka topic, you simply need to open the ***kubectl*** console in ***KubeSphere Toolbox*** and run the following command:
+
+{{< notice note >}}
+
+The following will create Kafka and Zookeeper clusters with storage type `ephemeral` which is `emptydir` for demo purpose. You should use other storage types for production, please refer to [kafka-persistent](https://github.com/strimzi/strimzi-kafka-operator/blob/0.19.0/examples/kafka/kafka-persistent.yaml)
+
+{{ notice >}}
+
+```yaml
+cat < ***Clusters Management***
+- Select a cluster if multiple clusters exist
+- Click ***Cluster Settings*** -> ***Log Collections***
+- Log receivers can be added by clicking ***Add Log Collector***
+
+
+
+{{< notice note >}}
+
+- At most one receiver can be added for each receiver type.
+- Different types of receivers can be added simultaneously.
+
+{{ notice >}}
+
+### Add Elasticsearch as log receiver
+
+A default Elasticsearch receiver will be added with its service address set to an Elasticsearch cluster if logging/events/auditing is enabled in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md)
+
+An internal Elasticsearch cluster will be deployed into K8s cluster if neither ***externalElasticsearchUrl*** nor ***externalElasticsearchPort*** are specified in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md) when logging/events/auditing is enabled.
+
+Configuring an external Elasticsearch cluster is recommended for production usage, the internal Elasticsearch cluster is for test/development/demo purpose only.
+
+Log searching relies on the internal/external Elasticsearch cluster configured.
+
+Please refer to [Add Elasticsearch as receiver](../add-es-as-receiver) to add a new Elasticsearch log receiver if the default one is deleted.
+
+### Add Kafka as log receiver
+
+Kafka is often used to receive logs and serve as a broker to other processing systems like Spark. [Add Kafka as receiver](../add-kafka-as-receiver) demonstrates how to add Kafka to receive Kubernetes logs.
+
+### Add Fluentd as log receiver
+
+If you need to output logs to more places other than Elasticsearch or Kafka, you'll need to add Fluentd as a log receiver. Fluentd has numerous output plugins which can forward logs to various destinations like S3, MongoDB, Cassandra, MySQL, syslog, Splunk etc. [Add Fluentd as receiver](../add-fluentd-as-receiver) demonstrates how to add Fluentd to receive Kubernetes logs.
+
+## Add Log Receiver (aka Collector) for events/auditing logs
+
+Starting from KubeSphere v3.0.0, K8s events logs and K8s/KubeSphere auditing logs can be archived in the same way as container logs. There will be ***Events*** or ***Auditing*** tab in the ***Log Collections*** page if ***events*** or ***auditing*** component is enabled in [ClusterConfiguration](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md). Log receivers for K8s events or K8s/KubeSphere auditing can be configured after switching to the corresponding tab.
+
+
+
+Container logs, K8s events and K8s/KubeSphere auditing logs should be stored in different Elasticsearch indices to be searched in KubeSphere, the index prefixes are:
+
+- ***ks-logstash-log*** for container logs
+- ***ks-logstash-events*** for K8s events
+- ***ks-logstash-auditing*** for K8s/KubeSphere auditing
+
+## Turn a log receiver on or off
+
+KubeSphere supports turning a log receiver on or off without adding/deleting it.
+To turn a log receiver on or off:
+
+- Click a log receiver and enter the receiver details page.
+- Click ***More*** -> ***Change Status***
+
+
+
+- You can select ***Activate*** or ***Close*** to turn the log receiver on or off
+
+
+
+- Log receiver's status will be changed to ***Close*** if you turn it off, otherwise the status will be ***Collecting***
+
+
+
+## Modify or delete a log receiver
+
+You can modify a log receiver or delete it:
+
+- Click a log receiver and enter the receiver details page.
+- You can edit a log receiver by clicking ***Edit*** or ***Edit Yaml***
+
+
+
+- Log receiver can be deleted by clicking ***Delete Log Collector***
diff --git a/content/zh/docs/cluster-administration/cluster-settings/mail-server.md b/content/zh/docs/cluster-administration/cluster-settings/mail-server.md
new file mode 100644
index 000000000..32fe01884
--- /dev/null
+++ b/content/zh/docs/cluster-administration/cluster-settings/mail-server.md
@@ -0,0 +1,34 @@
+---
+title: "邮件服务器"
+keywords: 'KubeSphere, Kubernetes, Notification, Mail Server'
+description: '邮件服务器'
+
+linkTitle: "邮件服务器"
+weight: 4190
+---
+
+## 目标
+
+本指南演示了告警策略的电子邮件通知设置(支持自定义设置)。 您可以指定用户电子邮件地址以接收告警消息。
+
+## 前提条件
+
+[KubeSphere Alerting and Notification](../../../pluggable-components/alerting-notification/) 需要启用。
+
+## 动手实验室
+
+1. 使用具有 ` platform-admin` 角色的一个帐户登录 Web 控制台。
+2. 点击左上角的平台管理,然后选择集群管理。
+
+
+
+1. 从列表中选择一个集群并输入它(如果您未启用[多集群功能](../../../multicluster-management/),则将直接转到**概述**页面)。
+2. 在**群集设置**下选择**邮件服务器**。 在页面中,提供您的邮件服务器配置和 SMTP 身份验证信息,如下所示:
+ - **SMTP 服务器地址**:填写可以提供邮件服务的 SMTP 服务器地址。 端口通常是 25。
+ - **使用 SSL 安全连接**:SSL 可用于加密邮件,从而提高了邮件传输信息的安全性。 通常,您必须为邮件服务器配置证书。
+ - SMTP 验证信息:如下填写 **SMTP 用户**,**SMTP 密码**,**发件人电子邮件地址**等
+
+
+
+5. 完成上述设置后,单击**保存**。 您可以发送测试电子邮件以验证服务器配置是否成功。
+
diff --git a/content/zh/docs/installing-on-linux/_index.md b/content/zh/docs/installing-on-linux/_index.md
index 912e60db1..8df7d3a31 100644
--- a/content/zh/docs/installing-on-linux/_index.md
+++ b/content/zh/docs/installing-on-linux/_index.md
@@ -9,13 +9,13 @@ weight: 2000
icon: "/images/docs/docs.svg"
---
-本章演示如何使用 KubeKey 在 Linux 上配置生产可用的 Kubernetes 和 KubeSphere 集群。 你还可以使用 KubeKey 轻松对集群扩缩容,并根据需要设置各种存储类。
+本章演示如何使用 KubeKey 在 Linux 上配置生产可用的 Kubernetes 和 KubeSphere 集群。您还可以使用 KubeKey 轻松对集群扩缩容,并根据需要设置各种存储类。
## 介绍
-### [总览](../installing-on-linux/introduction/intro/)
+### [概述](../installing-on-linux/introduction/intro/)
-本章的大致内容包括安装准备,安装工具和方法以及存储设置。
+浏览本章的概述,包括安装准备、安装工具和方法以及存储设置。
### [多节点安装](../installing-on-linux/introduction/multioverview/)
@@ -30,25 +30,25 @@ icon: "/images/docs/docs.svg"
### [持久化存储配置](../installing-on-linux/introduction/storage-configuration/)
-使用 KubeKey 将不同的存储类添加到集群, 比如 Ceph RBD 和 Glusterfs。
+使用 KubeKey 将不同的存储类添加到集群,例如 Ceph RBD 和 Glusterfs。
## 在本地环境中安装 KubeSphere
-### [在VMware vSphere 中部署](../installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere/)
+### [在 VMware vSphere 上部署](../installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere/)
了解如何在 VMware vSphere 上创建高可用 KubeSphere 群集。
## 在公有云上安装 KubeSphere
-### [在 Azure 虚拟机中部署 KubeSphere](../installing-on-linux/public-cloud/install-ks-on-azure-vms/)
+### [在 Azure 虚拟机上部署 KubeSphere](../installing-on-linux/public-cloud/install-ks-on-azure-vms/)
了解如何在 Azure 虚拟机上创建高可用 KubeSphere 群集。
-### [在 QingCloud 虚拟机上部署 KubeSphere](../installing-on-linux/public-cloud/kubesphere-on-qingcloud-instance/)
+### [在青云QingCloud 虚拟机上部署 KubeSphere](../installing-on-linux/public-cloud/kubesphere-on-qingcloud-instance/)
-了解如何在 QingCloud 平台上创建高可用 KubeSphere 集群。
+了解如何在青云QingCloud 平台上创建高可用 KubeSphere 集群。
-## 运维你的集群
+## 添加或删除节点
### [添加新节点](../installing-on-linux/cluster-operation/add-new-nodes/)
@@ -56,7 +56,7 @@ icon: "/images/docs/docs.svg"
### [移除节点](../installing-on-linux/cluster-operation/remove-nodes/)
-cordon 节点,或者删除节点以缩小集群规模。
+停止调度节点,或者删除节点以缩小集群规模。
## 卸载
@@ -66,12 +66,12 @@ cordon 节点,或者删除节点以缩小集群规模。
## FAQ
-### [配置 Booster](../installing-on-linux/faq/configure-booster/)
+### [为安装配置加速器](../installing-on-linux/faq/configure-booster/)
配置镜像仓库以加快安装速度。
-## 更多信息
+## 常用指南
-在下面的章节中,你将找到那些最受欢迎的页面。强烈建议你先参考一下。
+以下是本章节中的常用指南,建议您优先参考。
{{< popularPage icon="/images/docs/qingcloud-2.svg" title="Deploy KubeSphere on QingCloud" description="Provision an HA KubeSphere cluster on QingCloud." link="../installing-on-linux/public-cloud/kubesphere-on-qingcloud-instance/" >}}
diff --git a/content/zh/docs/installing-on-linux/introduction/storage-configuration.md b/content/zh/docs/installing-on-linux/introduction/storage-configuration.md
index 2bb8cce63..fbf01e324 100644
--- a/content/zh/docs/installing-on-linux/introduction/storage-configuration.md
+++ b/content/zh/docs/installing-on-linux/introduction/storage-configuration.md
@@ -1,21 +1,75 @@
---
title: "Persistent Storage Configuration"
-keywords: 'kubernetes, docker, kubesphere, storage, volume, PVC'
+keywords: 'Kubernetes, docker, KubeSphere, storage, volume, PVC, KubeKey, add-on'
description: 'Persistent Storage Configuration'
linkTitle: "Persistent Storage Configuration"
weight: 2140
---
-## Overview
-Persistence volume is **Must** for Kubesphere. So before installation of Kubesphere, one **default**
-[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) and corresponding storage plugin should be installed on the Kubernetes cluster.
-As different users may choose different storage plugin, [KubeKey](https://github.com/kubesphere/kubekey) supports to install storage plugin by the way of
-[add-on](https://github.com/kubesphere/kubekey/blob/v1.0.0/docs/addons.md). This passage will introduce add-on configuration for some mainly used storage plugin.
-## QingCloud-CSI
-[QingCloud-CSI](https://github.com/yunify/qingcloud-csi) plugin implements an interface between CSI-enabled Container Orchestrator (CO) and the disk of QingCloud.
-Here is a helm-chart example of installing by KubeKey add-on.
-```bash
+## Overview
+Persistent volumes are a **Must** for installing KubeSphere. [KubeKey](https://github.com/kubesphere/kubekey) lets KubeSphere be installed on different storage systems by the [add-on mechanism](https://github.com/kubesphere/kubekey/blob/v1.0.0/docs/addons.md). General steps of installing KubeSphere by KubeKey on Linux are:
+
+1. Install Kubernetes.
+2. Install the **add-on** plugin for KubeSphere.
+3. Install Kubesphere by [ks-installer](https://github.com/kubesphere/ks-installer).
+
+In KubeKey configurations, `spec.persistence.storageClass` of `ClusterConfiguration` needs to be set for ks-installer to create a PersistentVolumeClaim (PVC) for KubeSphere. If it is empty, the **default StorageClass** (annotation `storageclass.kubernetes.io/is-default-class` equals to `true`) will be used.
+``` yaml
+apiVersion: installer.kubesphere.io/v1alpha1
+kind: ClusterConfiguration
+spec:
+ persistence:
+ storageClass: ""
+...
+```
+
+Therefore, an available StorageClass **must** be installed in Step 2 above. It includes:
+- StorageClass itself
+- Storage Plugin for the StorageClass if necessary
+
+This tutorial introduces **KubeKey add-on configurations** for some mainly used storage plugins. If `spec.persistence.storageClass` is empty, the default StorageClass will be installed. Refer to the following sections if you want to configure other storage systems.
+
+## QingCloud CSI
+If you plan to install KubeSphere on [QingCloud](https://www.qingcloud.com/), [QingCloud CSI](https://github.com/yunify/qingcloud-csi) can be chosen as the underlying storage plugin. The following is an example of KubeKey add-on configurations for QingCloud CSI installed by **Helm Charts including a StorageClass**.
+
+### Chart Config
+```yaml
+config:
+ qy_access_key_id: "MBKTPXWCIRIEDQYQKXYL" # Replace it with your own key id.
+ qy_secret_access_key: "cqEnHYZhdVCVif9qCUge3LNUXG1Cb9VzKY2RnBdX" # Replace it with your own access key.
+ zone: "pek3a" # Lowercase letters only.
+sc:
+ isDefaultClass: true # Set it as the default storage class.
+```
+You need to create this file of chart configurations and input the values above manually.
+
+#### Key
+
+To get values for `qy_access_key_id` and `qy_secret_access_key`, log in the web console of [QingCloud](https://console.qingcloud.com/login) and refer to the image below to create a key first. Download the key after it is created, which is stored in a csv file.
+
+
+
+#### Zone
+
+The field `zone` specifies where your cloud volumes are deployed. On QingCloud Platform, you must select a zone before you create volumes.
+
+
+
+Make sure the value you specify for `zone` matches the region ID below:
+
+| Zone | Region ID |
+| ------------------------------------------- | ----------------------- |
+| Shanghai1-A/Shanghai1-B | sh1a/sh1b |
+| Beijing3-A/Beijing3-B/Beijing3-C/Beijing3-D | pek3a/pek3b/pek3c/pek3d |
+| Guangdong2-A/Guangdong2-B | gd2a/gd2b |
+| Asia-Pacific 2-A | ap2a |
+
+If you want to configure more values, see [chart configuration for QingCloud CSI](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration).
+
+### Add-on Config
+Save the above chart config locally (e.g. `/root/csi-qingcloud.yaml`). The add-on config for QingCloud CSI could be like:
+```yaml
addons:
- name: csi-qingcloud
namespace: kube-system
@@ -23,19 +77,24 @@ addons:
chart:
name: csi-qingcloud
repo: https://charts.kubesphere.io/test
- values:
- - config.qy_access_key_id=SHOULD_BE_REPLACED
- - config.qy_secret_access_key=SHOULD_BE_REPLACED
- - config.zone=SHOULD_BE_REPLACED
- - sc.isDefaultClass=true
+ values: /root/csi-qingcloud.yaml
```
-For more information about QingCloud, see [QingCloud](https://www.qingcloud.com/).
-For more chart values, see [configuration](https://github.com/kubesphere/helm-charts/tree/master/src/test/csi-qingcloud#configuration).
-## NFS-client
-The [nfs-client-provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your
-*already configured* NFS server, dynamically creating Persistent Volumes.
-Hear is a helm-chart example of installing by KubeKey add-on.
+## NFS Client
+With a NFS server, you can choose [NFS-client Provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) as the storage plugin. NFS-client Provisioner creates the PersistentVolume dynamically. The following is an example of KubeKey add-on configurations for NFS-client Provisioner installed by **Helm Charts including a StorageClass** .
+
+### Chart Config
+```yaml
+nfs:
+ server: "192.168.0.27" # <--ToBeReplaced->
+ path: "/mnt/csi/" # <--ToBeReplaced->
+storageClass:
+ defaultClass: false
+```
+If you want to configure more values, see [chart configuration for nfs-client](https://github.com/kubesphere/helm-charts/tree/master/src/main/nfs-client-provisioner#configuration).
+
+### Add-on Config
+Save the above chart config locally (e.g. `/root/nfs-client.yaml`). The add-on config for NFS-Client Provisioner cloud be like:
```yaml
addons:
- name: nfs-client
@@ -44,46 +103,39 @@ addons:
chart:
name: nfs-client-provisioner
repo: https://charts.kubesphere.io/main
- values:
- - nfs.server=SHOULD_BE_REPLACED
- - nfs.path=SHOULD_BE_REPLACED
- - storageClass.defaultClass=true
+ values: /root/nfs-client.yaml
```
-For more chart values, see [configuration](https://github.com/kubesphere/helm-charts/tree/master/src/main/csi-nfs-provisioner#configuration)
-## Ceph RBD
-Ceph RBD is an in-tree storage plugin on Kubernetes. As **hyperkube** images were [deprecated since 1.17](https://github.com/kubernetes/kubernetes/pull/85094),
-**KubeKey** will never use **hyperkube** images. So in-tree Ceph RBD may not work on Kubernetes installed by **KubeKey**.
-If you work with 14.0.0(Nautilus)+ Ceph Cluster, we appreciate you to use [Ceph CSI](#Ceph CSI).
-Meanwhile you could use [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) as substitute, which is same format with in-tree Ceph RBD.
-Here is an example of rbd-provisioner.
-```yaml
-- name: rbd-provisioner
- namespace: kube-system
- sources:
- chart:
- name: rbd-provisioner
- repo: https://charts.kubesphere.io/test
- values:
- - ceph.mon=SHOULD_BE_REPLACED # like 192.168.0.10:6789
- - ceph.adminKey=SHOULD_BE_REPLACED
- - ceph.userKey=SHOULD_BE_REPLACED
- - sc.isDefault=true
-```
-For more values, see [configuration](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration)
+## Ceph
+With a Ceph server, you can choose [Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd) or [Ceph CSI](https://github.com/ceph/ceph-csi) as the underlying storage plugin. Ceph RBD is an in-tree storage plugin on Kubernetes, and Ceph CSI is a Container Storage Interface (CSI) driver for RBD, CephFS.
+
+### Which Plugin to Select for Ceph
+Ceph CSI RBD is the preferred choice if you work with **14.0.0 (Nautilus)+** Ceph cluster. Here are some reasons:
+- The in-tree plugin will be deprecated in the future.
+- Ceph RBD only works on Kubernetes with **hyperkube** images, and **hyperkube** images were
+[deprecated since Kubernetes 1.17](https://github.com/kubernetes/kubernetes/pull/85094).
+- Ceph CSI has more features such as cloning, expanding and snapshots.
+
+### Ceph CSI RBD
+Ceph-CSI needs to be installed on v1.14.0+ Kubernetes, and work with 14.0.0 (Nautilus)+ Ceph Cluster.
+For details about compatibility, see [Ceph CSI Support Matrix](https://github.com/ceph/ceph-csi#support-matrix).
+
+The following is an example of KubeKey add-on configurations for Ceph CSI RBD installed by **Helm Charts**.
+As the StorageClass is not included in the chart, a StorageClass needs to be configured in the add-on config.
+
+#### Chart Config
-## Ceph CSI
-[Ceph-CSI](https://github.com/ceph/ceph-csi) contains Ceph Container Storage Interface (CSI) driver for RBD, CephFS. It will be substitute for [Ceph-RBD](#Ceph RBD) in the future.
-Ceph CSI should be installed on v1.14.0+ Kubernetes, and work with 14.0.0(Nautilus)+ Ceph Cluster.
-For details about compatibility, see [support matrix](https://github.com/ceph/ceph-csi#support-matrix). Here is an example of installing ceph-csi-rbd by **KubeKey** add-on.
```yaml
csiConfig:
- clusterID: "cluster1"
monitors:
- - SHOULD_BE_REPLACED
+ - "192.168.0.8:6789" # <--TobeReplaced-->
+ - "192.168.0.9:6789" # <--TobeReplaced-->
+ - "192.168.0.10:6789" # <--TobeReplaced-->
```
-Save the YAML file of ceph config in local, **/root/ceph-csi-config.yaml** for example.
+If you want to configure more values, see [chart configuration for ceph-csi-rbd](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd).
+#### StorageClass (including secret)
```yaml
apiVersion: v1
kind: Secret
@@ -92,7 +144,7 @@ metadata:
namespace: kube-system
stringData:
userID: admin
- userKey: SHOULD_BE_REPLACED
+ userKey: "AQDoECFfYD3DGBAAm6CPhFS8TQ0Hn0aslTlovw==" # <--ToBeReplaced-->
encryptionPassphrase: test_passphrase
---
apiVersion: storage.k8s.io/v1
@@ -105,7 +157,7 @@ metadata:
provisioner: rbd.csi.ceph.com
parameters:
clusterID: "cluster1"
- pool: rbd
+ pool: "rbd" # <--ToBeReplaced-->
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: kube-system
@@ -119,8 +171,9 @@ allowVolumeExpansion: true
mountOptions:
- discard
```
-Save the YAML file of StorageClass in local, **/root/ceph-csi-rbd-sc.yaml** for example. The add-on configuration could be set like:
+#### Add-On Config
+Save the above chart config and StorageClass locally (e.g. `/root/ceph-csi-rbd.yaml` and `/root/ceph-csi-rbd-sc.yaml`). The add-on configuration can be set like:
```yaml
addons:
- name: ceph-csi-rbd
@@ -129,18 +182,46 @@ addons:
chart:
name: ceph-csi-rbd
repo: https://ceph.github.io/csi-charts
- values: /root/ceph-csi-config.yaml
+ values: /root/ceph-csi-rbd.yaml
- name: ceph-csi-rbd-sc
sources:
yaml:
path:
- /root/ceph-csi-rbd-sc.yaml
```
-For more information, see [chart for ceph-csi-rbd](https://github.com/ceph/ceph-csi/tree/master/charts/ceph-csi-rbd)
+### Ceph RBD
+KubeKey will never use **hyperkube** images. Hence, in-tree Ceph RBD may not work on Kubernetes installed by KubeKey. However, if your Ceph cluster is lower than 14.0.0 which means Ceph CSI can't be used, [rbd provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/ceph/rbd) can be used as a substitute for Ceph RBD. Its format is the same with [in-tree Ceph RBD](https://kubernetes.io/docs/concepts/storage/storage-classes/#ceph-rbd).
+The following is an example of KubeKey add-on configurations for rbd provisioner installed by **Helm Charts including a StorageClass**.
+
+#### Chart Config
+```yaml
+ceph:
+ mon: "192.168.0.12:6789" # <--ToBeReplaced-->
+ adminKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced-->
+ userKey: "QVFBS1JkdGRvV0lySUJBQW5LaVpSKzBRY2tjWmd6UzRJdndmQ2c9PQ==" # <--ToBeReplaced-->
+sc:
+ isDefault: false
+```
+If you want to configure more values, see [chart configuration for rbd-provisioner](https://github.com/kubesphere/helm-charts/tree/master/src/test/rbd-provisioner#configuration).
+
+#### Add-on Config
+Save the above chart config locally (e.g. `/root/rbd-provisioner.yaml`). The add-on config for rbd provisioner cloud be like:
+```yaml
+- name: rbd-provisioner
+ namespace: kube-system
+ sources:
+ chart:
+ name: rbd-provisioner
+ repo: https://charts.kubesphere.io/test
+ values: /root/rbd-provisioner.yaml
+```
## Glusterfs
-Glusterfs is an in-tree storage plugin on Kubernetes, only StorageClass is need to been installed.
+[Glusterfs](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs) is an in-tree storage plugin in Kubernetes. Hence, **only StorageClass** needs to be installed.
+The following is an example of KubeKey add-on configurations for glusterfs.
+
+### StorageClass (including secret)
```yaml
apiVersion: v1
kind: Secret
@@ -149,7 +230,7 @@ metadata:
namespace: kube-system
type: kubernetes.io/glusterfs
data:
- key: SHOULD_BE_REPLACED
+ key: "MTIzNDU2" # <--ToBeReplaced-->
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -159,24 +240,24 @@ metadata:
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]'
name: glusterfs
parameters:
- clusterid: SHOULD_BE_REPLACED
+ clusterid: "21240a91145aee4d801661689383dcd1" # <--ToBeReplaced-->
gidMax: "50000"
gidMin: "40000"
restauthenabled: "true"
- resturl: SHOULD_BE_REPLCADED # like "http://192.168.0.14:8080"
+ resturl: "http://192.168.0.14:8080" # <--ToBeReplaced-->
restuser: admin
secretName: heketi-secret
secretNamespace: kube-system
- volumetype: SHOULD_BE_REPLACED # like replicate:2
+ volumetype: "replicate:2" # <--ToBeReplaced-->
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
```
-For detailed information, see [configuration](https://kubernetes.io/docs/concepts/storage/storage-classes/#glusterfs)
-Save the YAML file of StorageClass in local, **/root/glusterfs-sc.yaml** for example. The add-on configuration could be set like:
-```bash
+### Add-on Config
+Save the above StorageClass yaml locally (e.g. **/root/glusterfs-sc.yaml**). The add-on configuration can be set like:
+```yaml
- addon
- name: glusterfs
sources:
@@ -186,10 +267,12 @@ Save the YAML file of StorageClass in local, **/root/glusterfs-sc.yaml** for exa
```
## OpenEBS/LocalVolumes
-[OpenEBS](https://github.com/openebs/openebs) Dynamic Local PV provisioner can create Kubernetes Local Persistent Volumes using a unique
-HostPath (directory) on the node to persist data. It's very convenient for experience KubeSphere when you has no special storage system.
-If no default StorageClass configured of **KubeKey** add-on, OpenEBS/LocalVolumes will be installed.
+[OpenEBS](https://github.com/openebs/openebs) Dynamic Local PV provisioner can create Kubernetes Local Persistent Volumes
+using a unique HostPath (directory) on the node to persist data.
+It is very convenient for users to get started with KubeSphere when they have no special storage system.
+If **no default StorageClass** is configured with **KubeKey** add-on, OpenEBS/LocalVolumes will be installed.
## Multi-Storage
-If you intend to install more than one storage plugins, remind to set only one to be default.
-Otherwise [ks-installer](https://github.com/kubesphere/ks-installer) will be confused about which StorageClass to use.
+If you intend to install more than one storage plugins, please only set one of them to be the default or
+set `spec.persistence.storageClass` of `ClusterConfiguration` with the StorageClass name you want Kubesphere to use.
+Otherwise, [ks-installer](https://github.com/kubesphere/ks-installer) will be confused about which StorageClass to use.
\ No newline at end of file
diff --git a/content/zh/docs/installing-on-linux/public-cloud/install-ks-on-huaweicloud-ecs.md b/content/zh/docs/installing-on-linux/public-cloud/install-ks-on-huaweicloud-ecs.md
index 991cc9ada..1b94b33c5 100644
--- a/content/zh/docs/installing-on-linux/public-cloud/install-ks-on-huaweicloud-ecs.md
+++ b/content/zh/docs/installing-on-linux/public-cloud/install-ks-on-huaweicloud-ecs.md
@@ -224,7 +224,7 @@ spec:
enabled: true
notification: # It supports notification management in multi-tenant Kubernetes clusters. It allows you to set AlertManager as its sender, and receivers include Email, Wechat Work, and Slack.
enabled: true
- openpitrix: # Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management
+ openpitrix: # Whether to install KubeSphere App Store. It provides an application store for Helm-based applications, and offer application lifecycle management
enabled: true
servicemesh: # Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology
enabled: true
diff --git a/content/zh/docs/multicluster-management/_index.md b/content/zh/docs/multicluster-management/_index.md
index 8dd3a3384..e34413344 100644
--- a/content/zh/docs/multicluster-management/_index.md
+++ b/content/zh/docs/multicluster-management/_index.md
@@ -1,62 +1,62 @@
---
-title: "Multi-cluster Management"
-description: "Import a hosted or on-premises Kubernetes cluster into KubeSphere"
+title: "多集群管理"
+description: "将托管的或本地的 Kubernetes 集群导入 KubeSphere"
layout: "single"
-linkTitle: "Multi-cluster Management"
+linkTitle: "多集群管理"
weight: 3000
icon: "/images/docs/docs.svg"
---
-## Introduction
+## 介绍
-This chapter demonstrates how to use the multi-cluster feature of KubeSphere to import heterogeneous clusters for unified management.
+本章演示如何使用 KubeSphere 的多集群功能导入异构集群以进行统一的管理。
-### [Overview](../multicluster-management/introduction/overview/)
+### [概要](../multicluster-management/introduction/overview/)
-Gain a basic understanding of multi-cluster management, such as its common use cases, and the benefits that KubeSphere can bring with its multi-cluster feature.
+对多集群管理有个基本的了解,例如多集群管理的常见用例,以及 KubeSphere 可以通过多集群功能带来的好处。
-### [Kubernetes Federation in KubeSphere](../multicluster-management/introduction/kubefed-in-kubesphere/)
+### [KubeSphere 中的 Kubernetes 联邦](../multicluster-management/introduction/kubefed-in-kubesphere/)
-Understand the fundamental concept of Kubernetes federation in KubeSphere, including M clusters and H clusters.
+了解 KubeSphere 中的 Kubernetes 联邦的基本概念,包括 M 集群和 H 集群。
-## Enable Multi-cluster in KubeSphere
+## 直接连接
-### [Direct Connection](../multicluster-management/enable-multicluster/direct-connection/)
+### [直接连接](../multicluster-management/enable-multicluster/direct-connection/)
-Understand the general steps of importing clusters through direct connection.
+了解通过直接连接导入集群的一般步骤。
-### [Agent Connection](../multicluster-management/enable-multicluster/agent-connection/)
+### [代理连接](../multicluster-management/enable-multicluster/agent-connection/)
-Understand the general steps of importing clusters through agent connection.
+了解通过代理连接导入集群的一般步骤。
-### [Retrieve KubeConfig](../multicluster-management/enable-multicluster/retrieve-kubeconfig/)
+### [获取 KubeConfig](../multicluster-management/enable-multicluster/retrieve-kubeconfig/)
-Retrieve the KubeConfig which is needed for cluster importing through direct connection.
+获取通过直接连接导入集群所需的 KubeConfig。
-## Import Cloud-hosted Kubernetes Cluster
+## 导入云托管的 Kubernetes 集群
-### [Import Aliyun ACK Cluster](../multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack/)
+### [导入阿里云 ACK 集群](../multicluster-management/import-cloud-hosted-k8s/import-aliyun-ack/)
-Learn how to import an Alibaba Cloud Kubernetes cluster.
+了解如何导入阿里云 Kubernetes 集群。
-### [Import AWS EKS Cluster](../multicluster-management/import-cloud-hosted-k8s/import-aws-eks/)
+### [导入 AWS EKS 集群](../multicluster-management/import-cloud-hosted-k8s/import-aws-eks/)
-Learn how to import an Amazon Elastic Kubernetes Service cluster.
+了解如何导入 Amazon Elastic Kubernetes 服务集群。
-## Import On-prem Kubernetes Cluster
+## 导入本地 Kubernetes 集群
-### [Import Kubeadm Kubernetes Cluster](../multicluster-management/import-on-prem-k8s/import-kubeadm-k8s/)
+### [导入 Kubeadm Kubernetes 集群](../multicluster-management/import-on-prem-k8s/import-kubeadm-k8s/)
-Learn how to import a Kubernetes cluster created with kubeadm.
+了解如何导入通过 kubeadm 创建的 Kubernetes 集群。
-## Remove Cluster
+## 删除集群
-### [Remove a Cluster from KubeSphere](../multicluster-management/remove-cluster/kubefed-in-kubesphere/)
+### [从 KubeSphere 删除集群](../multicluster-management/remove-cluster/kubefed-in-kubesphere/)
-Learn how to unbind a cluster from your cluster pool in KubeSphere.
+了解如何从 KubeSphere 的集群池中解除集群的绑定。
diff --git a/content/zh/docs/multicluster-management/introduction/_index.md b/content/zh/docs/multicluster-management/introduction/_index.md
index 44efc6f9c..9668e8ac4 100644
--- a/content/zh/docs/multicluster-management/introduction/_index.md
+++ b/content/zh/docs/multicluster-management/introduction/_index.md
@@ -1,5 +1,5 @@
---
-linkTitle: "Introduction"
+linkTitle: "介绍"
weight: 3005
_build:
diff --git a/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md b/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md
index f0ea3f98d..f4cf648af 100644
--- a/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md
+++ b/content/zh/docs/multicluster-management/introduction/kubefed-in-kubesphere.md
@@ -1,13 +1,13 @@
---
-title: "Kubernetes Federation in KubeSphere"
-keywords: 'Kubernetes, KubeSphere, federation, multicluster, hybrid-cloud'
-description: 'Overview'
+title: "KubeSphere 中的 Kubernetes 联邦"
+keywords: 'Kubernetes, KubeSphere, 联邦, 多集群, 混合云'
+description: '概要'
weight: 3007
---
-The multi-cluster feature relates to the network connection among multiple clusters. Therefore, it is important to understand the topological relations of clusters as the workload can be reduced.
+多群集功能与多个群集之间的网络连接有关。 因此,了解集群的拓扑关系很重要,这样可以减少工作量。
-Before you use the multi-cluster feature, you need to create a Host Cluster (hereafter referred to as **H** Cluster), which is actually a KubeSphere cluster with the multi-cluster feature enabled. All the clusters managed by the H Cluster are called Member Cluster (hereafter referred to as **M** Cluster). They are common KubeSphere clusters that do not have the multi-cluster feature enabled. There can only be one H Cluster while multiple M Clusters can exist at the same time. In a multi-cluster architecture, the network between the H Cluster and the M Cluster can be connected directly or through an agent. The network between M Clusters can be set in a completely isolated environment.
+在使用多集群功能之前,您需要创建一个主集群(Host Cluster,以下简称 **H** 集群),H 集群实际上是启用了多集群功能的 KubeSphere 集群。所有被 H 集群管理的集群称为成员集群(Member Cluster,以下简称 **M** 集群)。M 集群是未启用多集群功能的普通 KubeSphere 集群。只能有一个 H 集群存在,而多个 M 集群可以同时存在。 在多集群体系结构中,H 集群和 M 集群之间的网络可以直接连接,也可以通过代理连接。 M 集群之间的网络可以设置在完全隔离的环境中。
-
+
diff --git a/content/zh/docs/multicluster-management/introduction/overview.md b/content/zh/docs/multicluster-management/introduction/overview.md
index 77e8530f9..3a0ed5f1e 100644
--- a/content/zh/docs/multicluster-management/introduction/overview.md
+++ b/content/zh/docs/multicluster-management/introduction/overview.md
@@ -1,15 +1,15 @@
---
-title: "Overview"
-keywords: 'Kubernetes, KubeSphere, multicluster, hybrid-cloud'
-description: 'Overview'
+title: "概要"
+keywords: 'Kubernetes, KubeSphere, 多集群, 混合云'
+description: '概要'
weight: 3006
---
-Today, it's very common for organizations to run and manage multiple Kubernetes clusters across different cloud providers or infrastructures. As each Kubernetes cluster is a relatively self-contained unit, the upstream community is struggling to research and develop a multi-cluster management solution. That said, Kubernetes Cluster Federation ([KubeFed](https://github.com/kubernetes-sigs/kubefed) for short) may be a possible approach among others.
+如今,在不同的云服务提供商或者基础设施上运行和管理多个 Kubernetes 集群已经非常普遍。 由于每个 Kubernetes 集群都是一个相对独立的单元,上游社区正努力研发多集群管理解决方案。 也就是说,Kubernetes 集群联邦(Kubernetes Cluster Federation,简称 [KubeFed](https://github.com/kubernetes-sigs/kubefed))可能是其中一种可行的方法。
-The most common use cases of multi-cluster management include service traffic load balancing, development and production isolation, decoupling of data processing and data storage, cross-cloud backup and disaster recovery, flexible allocation of computing resources, low latency access with cross-region services, and vendor lock-in avoidance.
+多集群管理最常见的用例包括服务流量负载均衡、开发和生产的隔离、数据处理和数据存储的分离、跨云备份和灾难恢复、计算资源的灵活分配、跨区域服务的低延迟访问以及厂商捆绑的防范。
-KubeSphere is developed to address multi-cluster and multi-cloud management challenges and implement the proceeding user scenarios, providing users with a unified control plane to distribute applications and its replicas to multiple clusters from public cloud to on-premises environments. KubeSphere also provides rich observability cross multiple clusters including centralized monitoring, logging, events, and auditing logs.
+KubeSphere 的开发旨在解决多集群和多云管理的难题,并实现后续的用户场景,为用户提供统一的控制平面,以将应用程序及其副本分发到从公有云到本地环境的多个集群。 KubeSphere 还提供跨多个群集的丰富的可观察性,包括集中式监视、日志记录、事件和审核日志。
-
+
diff --git a/content/zh/docs/pluggable-components/_index.md b/content/zh/docs/pluggable-components/_index.md
index 46476c850..d6aea85df 100644
--- a/content/zh/docs/pluggable-components/_index.md
+++ b/content/zh/docs/pluggable-components/_index.md
@@ -1,49 +1,49 @@
---
-title: "Enable Pluggable Components"
-description: "Enable KubeSphere Pluggable Components"
+title: "启用可插拔组件"
+description: "启用 KubeSphere 的可插拔组件"
layout: "single"
-linkTitle: "Enable Pluggable Components"
+linkTitle: "启用可插拔组件"
weight: 3500
icon: "/images/docs/docs.svg"
---
-This chapter demonstrates detailed steps of enabling different components in KubeSphere both before and after installation so that you can take full advantage of the container platform for your business.
+本章详细演示了 KubeSphere 中不同组件在安装前和安装后的启用步骤,以便您可以充分利用容器平台为您的业务服务。
-## [Overview](../pluggable-components/overview/)
+## [概览](../pluggable-components/overview/)
-Develop a basic understanding of key components in KubeSphere, including features and resource consumption.
+培养对 KubeSphere 中关键组件的基本理解,包括功能和资源消耗。
-## [KubeSphere App Store](../pluggable-components/app-store/)
+## [KubeSphere 应用商店](../pluggable-components/app-store/)
-Learn how to enable App Store to share data and apps internally and set industry standards of delivery process externally.
+学习如何在内部实现 KubeSphere 应用商店的数据和应用共享,并在外部制定交付流程的行业标准。
-## [KubeSphere DevOps System](../pluggable-components/devops/)
+## [KubeSphere DevOps 系统](../pluggable-components/devops/)
-Learn how to enable DevOps to further free your developers and let them focus on code writing.
+了解如何启用 DevOps 系统来进一步解放你的开发人员,让他们专注于代码编写。
-## [KubeSphere Auditing Logs](../pluggable-components/auditing-logs/)
+## [KubeSphere 审计日志](../pluggable-components/auditing-logs/)
-Learn how to enable Auditing to document platform events and activities.
+了解如何启用审计来记录平台事件和活动。
-## [KubeSphere Events](../pluggable-components/events/)
+## [KubeSphere 事件](../pluggable-components/events/)
-Learn how to enable Events to keep track of everything that is happening on the platform.
+了解如何启用 KubeSphere 事件模块来跟踪平台上发生的一切。
-## [KubeSphere Logging System](../pluggable-components/logging/)
+## [KubeSphere 日志系统](../pluggable-components/logging/)
-Learn how to enable Logging to leverage the tenant-based system for log collection, query and management.
+了解如何启用日志,利用基于租户的系统进行日志收集、查询和管理。
-## [KubeSphere Service Mesh](../pluggable-components/service-mesh/)
+## [KubeSphere 服务网关](../pluggable-components/service-mesh/)
-Learn how to enable Service Mesh to use different traffic management strategies for microservices governance.
+了解如何使服务网格使用不同的流量管理策略进行微服务治理。
-## [KubeSphere Alerting and Notification](../pluggable-components/alerting-notification/)
+## [KubeSphere 告警和通知](../pluggable-components/alerting-notification/)
-Learn how to enable Alerting and Notification to identify any potential issues in advance before they take a toll on your business.
+了解如何启用告报和通知功能,以便在潜在问题对您的业务造成影响之前提前识别这些问题。
-## [Network Policy](../pluggable-components/network-policy/)
+## [网络策略](../pluggable-components/network-policy/)
-Learn how to enable Network Policy to control traffic flow at the IP address or port level.
\ No newline at end of file
+了解如何启用网络策略来控制 IP 地址或端口级别的流量。
diff --git a/content/zh/docs/pluggable-components/alerting-notification.md b/content/zh/docs/pluggable-components/alerting-notification.md
index 8db1091fe..5b322612a 100644
--- a/content/zh/docs/pluggable-components/alerting-notification.md
+++ b/content/zh/docs/pluggable-components/alerting-notification.md
@@ -1,31 +1,31 @@
---
-title: "KubeSphere Alerting and Notification"
+title: "KubeSphere 告警和通知系统"
keywords: "Kubernetes, alertmanager, KubeSphere, alerting, notification"
-description: "How to Enable Alerting and Notification"
+description: "如何启用告警和通知系统"
-linkTitle: "KubeSphere Alerting and Notification"
+linkTitle: "KubeSphere 告警和通知"
weight: 3545
---
-## What are KubeSphere Alerting and Notification
+## 什么是 KubeSphere 告警和通知系统
-Alerting and Notification are two important building blocks of observability, closely related monitoring and logging. The alerting system in KubeSphere, coupled with the proactive failure notification system, allows users to know activities of interest based on alert policies. When a predefined threshold of a certain metric is reached, an alert will be sent to preconfigured recipients, the notification method of which can be set by yourself, including Email, WeChat Work and Slack. With a highly functional alerting and notification system in place, you can quickly identify and resolve potential issues in advance before they affect your business.
+告警和通知是可观察性的两个重要构件,与监控和日志密切相关。KubeSphere 中的告警系统与主动故障通知系统相结合,用户可以根据告警策略了解感兴趣的活动。当达到某个指标的预定义阈值时,会向预先配置的收件人发出警报,通知方式可以自行设置,包括 Email、企业微信和 Slack。有了高功能的预警和通知系统,您就可以在潜在的问题影响到您的业务之前,迅速发现并提前解决。
-For more information, see Alerting Policy and Message.
+更多信息,请参见告警策略和消息。
{{< notice note >}}
-It is recommended that you enable Alerting and Notification together so that users can receive notifications of alerts in time.
+建议同时启用告警和通知功能,这样用户可以及时收到告警通知。
{{ notice >}}
-## Enable Alerting and Notification before Installation
+## 在安装前启用告警和通知系统
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -33,11 +33,11 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Alerting and Notification in this mode (e.g. for testing purpose), refer to the following section to see how Alerting and Notification can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用告警和通知(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用告警和通知系统。
{{ notice >}}
-2. In this file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `alerting` 和 `notification`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
alerting:
@@ -46,24 +46,25 @@ notification:
enabled: true # Change "false" to "true"
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Alerting and Notification, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装告警和通知系统,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Alerting and Notification, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用告警和通知系统,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `alerting` and `notification` and enable them by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `alerting` 和 `notification`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
alerting:
@@ -72,31 +73,31 @@ notification:
enabled: true # Change "false" to "true"
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Alerting and Notification after Installation
+## 在安装后启用告警和通知系统
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `alerting` and `notification` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `alerting` 和 `notification`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
alerting:
@@ -105,7 +106,7 @@ notification:
enabled: true # Change "false" to "true"
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -113,31 +114,31 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-If you can see **Alerting Messages** and **Alerting Policies** in the image below, it means the installation succeeds as the two parts won't display until you install the component.
+如果在下图中可以看到**告警消息**和**告警策略**,说明安装成功,因为在安装组件之前,这两部分不会显示。
-
+
{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n kubesphere-alerting-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
@@ -154,4 +155,4 @@ notification-deployment-748897cbdf-2djpr 1/1 Running 0 36m
{{ tab >}}
-{{ tabs >}}
\ No newline at end of file
+{{ tabs >}}
diff --git a/content/zh/docs/pluggable-components/app-store.md b/content/zh/docs/pluggable-components/app-store.md
index 0c5102ed5..ae03130fa 100644
--- a/content/zh/docs/pluggable-components/app-store.md
+++ b/content/zh/docs/pluggable-components/app-store.md
@@ -1,29 +1,29 @@
---
-title: "KubeSphere App Store"
+title: "KubeSphere 应用商店"
keywords: "Kubernetes, KubeSphere, app-store, OpenPitrix"
-description: "How to Enable KubeSphere App Store"
+description: "如何启用 KubeSphere 应用商店"
-linkTitle: "KubeSphere App Store"
+linkTitle: "KubeSphere 应用商店"
weight: 3515
---
-## What is KubeSphere App Store
+## 什么是 KubeSphere 应用商店
-As an open-source and app-centric container platform, KubeSphere provides users with a Helm-based app store for application lifecycle management on the back of [OpenPitrix](https://github.com/openpitrix/openpitrix), an open-source web-based system to package, deploy and manage different types of apps. KubeSphere App Store allows ISVs, developers and users to upload, test, deploy and release apps with just several clicks in a one-stop shop.
+作为一个开源的、以应用为中心的容器平台,KubeSphere 在 [OpenPitrix](https://github.com/openpitrix/openpitrix) 的基础上,为用户提供了一个基于 Helm 的应用商店,用于应用生命周期管理,这是一个开源的基于网络的系统,用于打包、部署和管理不同类型的应用。KubeSphere 应用商店允许 ISV、开发者和用户在一站式服务中只需点击几下就可以上传、测试、部署和发布应用。
-Internally, KubeSphere App Store can serve as a place for different teams to share data, middleware, and office applications. Externally, it is conducive to setting industry standards of building and delivery. By default, there are 15 apps in the App Store. After you enable this feature, you can add more apps with app templates.
+对内,KubeSphere 应用商店可以作为不同团队共享数据、中间件和办公应用的场所。对外,有利于制定行业标准的建设和交付。默认情况下,应用商店中有 15 个应用。启用该功能后,可以通过应用模板添加更多应用。
-
+
-For more information, see App Store.
+有关更多信息,请参阅应用商店。
-## Enable App Store before Installation
+## 在安装前启用应用商店
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -31,73 +31,74 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable App Store in this mode (e.g. for testing purpose), refer to the following section to see how App Store can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用应用商店(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用应用商店。
{{ notice >}}
-2. In this file, navigate to `openpitrix` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `openpitrix`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
openpitrix:
enabled: true # Change "false" to "true"
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install App Store, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装应用商店,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable App Store, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用应用商店,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `openpitrix` and enable App Store by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `openpitrix`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
openpitrix:
enabled: true # Change "false" to "true"
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable App Store after Installation
+## 在安装后启用应用商店
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `openpitrix` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `openpitrix`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
openpitrix:
enabled: true # Change "false" to "true"
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -105,31 +106,31 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-Go to **Components** and check the status of OpenPitrix. You may see an image as follows:
+进入**服务组件**,检查 **OpenPitrix** 的状态。您可能会看到如下图片:

{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n openpitrix-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/pluggable-components/auditing-logs.md b/content/zh/docs/pluggable-components/auditing-logs.md
index 9d3fef9cb..10925a0c4 100644
--- a/content/zh/docs/pluggable-components/auditing-logs.md
+++ b/content/zh/docs/pluggable-components/auditing-logs.md
@@ -1,25 +1,25 @@
---
-title: "KubeSphere Auditing Logs"
+title: "KubeSphere 审计日志"
keywords: "Kubernetes, auditing, KubeSphere, logs"
-description: "How to enable KubeSphere Auditing Logs"
+description: "如何启用 KubeSphere 审计日志"
-linkTitle: "KubeSphere Auditing Logs"
+linkTitle: "KubeSphere 审计日志"
weight: 3525
---
-## What are KubeSphere Auditing Logs?
+## 什么是 KubeSphere 审计日志
-KubeSphere Auditing Log System provides a security-relevant chronological set of records documenting the sequence of activities related to individual users, managers, or other components of the system. Each request to KubeSphere generates an event that is then written to a webhook and processed according to a certain rule.
+KubeSphere 审计日志系统提供了一套与安全相关的按时间顺序排列的记录,记录了与单个用户、管理人员或系统其他组件相关的活动顺序。对 KubeSphere 的每个请求都会产生一个事件,然后写入 Webhook,并根据一定的规则进行处理。
-For more information, see Logging, Events, and Auditing.
+有关更多信息,请参阅日志、事件和审计。
-## Enable Auditing Logs before Installation
+## 在安装前启用审计日志
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -27,11 +27,11 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Auditing in this mode (e.g. for testing purpose), refer to the following section to see how Auditing can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用审计日志(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用审计模式。
{{ notice >}}
-2. In this file, navigate to `auditing` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `auditing`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
auditing:
@@ -40,7 +40,7 @@ auditing:
{{< notice note >}}
-By default, KubeKey will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了审计功能,KubeKey 将在内部安装 Elasticsearch。对于生产环境,如果你想启用审计,强烈建议你在 **config-sample.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你在安装前提供以下信息,KubeKey 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -56,24 +56,25 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Auditing, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装审计日志,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Auditing, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用审计日志,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `auditing` and enable Auditing by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `auditing`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
auditing:
@@ -82,7 +83,7 @@ auditing:
{{< notice note >}}
-By default, ks-installer will install Elasticsearch internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了审计功能,ks-installer 会在内部安装 Elasticsearch。对于生产环境,如果你想启用审计,强烈建议你在 **cluster-configuration.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。当你在安装前提供以下信息时,ks-installer 将直接整合你的外部 Elasticsearch,而不是安装内部 Elasticsearch。
{{ notice >}}
@@ -98,31 +99,31 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Auditing Logs after Installation
+## 在安装后启用审计日志
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `auditing` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `auditing`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
auditing:
@@ -131,7 +132,7 @@ auditing:
{{< notice note >}}
-By default, Elasticsearch will be installed internally if Auditing is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Auditing, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了审计功能,Elasticsearch 将在内部安装。对于生产环境,如果你想启用审计,强烈建议你在这个 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你提供了以下信息,KubeSphere 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -147,7 +148,7 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -155,33 +156,33 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-If you enable both Logging and Auditing, you can check the status of Auditing in **Logging** in **Components**. You may see an image as follows:
+如果您同时启用了日志记录和审计日志,您可以在**服务组件**的 **Logging** 中查看审计状态。您可能会看到如下图片:

-If you only enable Auditing without Logging installed, you cannot see the image above as the button **Logging** will not display.
+如果只启用审计而不安装日志,则无法看到上面的图片,因为 **Loggiing** 按钮不会显示。
{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n kubesphere-logging-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/pluggable-components/devops.md b/content/zh/docs/pluggable-components/devops.md
index 753e4c14c..6ee22e74e 100644
--- a/content/zh/docs/pluggable-components/devops.md
+++ b/content/zh/docs/pluggable-components/devops.md
@@ -1,27 +1,27 @@
---
-title: "KubeSphere DevOps System"
+title: "KubeSphere DevOps 系统"
keywords: "Kubernetes, Jenkins, KubeSphere, DevOps, cicd"
-description: "How to Enable KubeSphere DevOps System"
+description: "如何启用 KubeSphere DevOps 系统"
-linkTitle: "KubeSphere DevOps System"
+linkTitle: "KubeSphere DevOps"
weight: 3520
---
-## What is KubeSphere DevOps System
+## 什么是 KubeSphere DevOps 系统
-KubeSphere DevOps System is designed for CI/CD workflows in Kubernetes. Based on [Jenkins](https://jenkins.io/), it provides one-stop solutions to help both development and Ops teams build, test and publish apps to Kubernetes in a straight-forward way. It also features plugin management, Binary-to-Image (B2I), Source-to-Image (S2I), code dependency caching, code quality analysis, pipeline logging, etc.
+KubeSphere DevOps 系统是专为 Kubernetes 中的 CI/CD 工作流设计的。基于 [Jenkins](https://jenkins.io/),它提供了一站式的解决方案,帮助开发和运维团队以直接的方式构建、测试和发布应用到 Kubernetes。它还具有插件管理、二进制到图像(B2I)、源到图像(S2I)、代码依赖缓存、代码质量分析、流水线日志等功能。
-The DevOps system offers an enabling environment for users as apps can be automatically released to the same platform. It is also compatible with third-party private image registries (e.g. Harbor) and code repositories (e.g. GitLab/GitHub/SVN/BitBucket). As such, it creates excellent user experiences by providing users with comprehensive, visualized CI/CD pipelines which are extremely useful in air-gapped environments.
+DevOps 系统为用户提供了一个有利的环境,因为应用可以自动发布到同一个平台。它还兼容第三方私有镜像注册库(如 Harbor)和代码库(如 GitLab/GitHub/SVN/BitBucket)。因此,它通过为用户提供全面的、可视化的 CI/CD 管道来创造优秀的用户体验,这些管道在气垫环境中非常有用。
-For more information, see DevOps Administration.
+有关更多信息,请参阅 DevOps 管理。
-## Enable DevOps before Installation
+## 在安装前启用 DevOps
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -29,73 +29,74 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable DevOps in this mode (e.g. for testing purpose), refer to the following section to see how DevOps can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用 DevOps(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用 DevOps 系统。
{{ notice >}}
-2. In this file, navigate to `devops` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `devops`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
devops:
enabled: true # Change "false" to "true"
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install DevOps, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装 DevOps 系统,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable DevOps, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用 DevOps,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `devops` and enable DevOps by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `devops`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
devops:
enabled: true # Change "false" to "true"
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable DevOps after Installation
+## 在安装后启用 DevOps 系统
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `devops` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `devops`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
devops:
enabled: true # Change "false" to "true"
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -103,31 +104,31 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-Go to **Components** and check the status of DevOps. You may see an image as follows:
+进入**服务组件**,检查 DevOps 的状态。你可能会看到如下图片:

{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n kubesphere-devops-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/pluggable-components/events.md b/content/zh/docs/pluggable-components/events.md
index d3b5626d7..ac87e51fc 100644
--- a/content/zh/docs/pluggable-components/events.md
+++ b/content/zh/docs/pluggable-components/events.md
@@ -1,25 +1,25 @@
---
-title: "KubeSphere Events"
+title: "KubeSphere 事件系统"
keywords: "Kubernetes, events, KubeSphere, k8s-events"
-description: "How to enable KubeSphere Events"
+description: "如何启用 KubeSphere 事件"
-linkTitle: "KubeSphere Events"
+linkTitle: "KubeSphere 事件系统"
weight: 3530
---
-## What are KubeSphere Events
+## 什么是 KubeSphere 事件系统
-KubeSphere events allow users to keep track of what is happening inside a cluster, such as node scheduling status and image pulling result. They will be accurately recorded with the specific reason, status and message displayed in the web console. To query events, users can quickly launch the web Toolkit and enter related information in the search bar with different filters (e.g keyword and project) available. Events can also be archived to third-party tools, such as Elasticsearch, Kafka or Fluentd.
+KubeSphere 事件系统允许用户跟踪集群内部发生的事情,如节点调度状态和镜像拉取结果。它们将被准确地记录下来,并在Web 控制台中显示具体的原因、状态和信息。要查询事件,用户可以快速启动 Web 工具箱,在搜索栏中输入相关信息,并有不同的过滤器(如关键字和项目)可供选择。事件也可以归档到第三方工具,如 Elasticsearch、Kafka 或 Fluentd。
-For more information, see Logging, Events and Auditing.
+有关更多信息,请参见日志记录、事件和审计系统。
-## Enable Events before Installation
+## 在安装前启用事件系统
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -27,11 +27,11 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Events in this mode (e.g. for testing purpose), refer to the following section to see how Events can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用事件(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用事件系统。
{{ notice >}}
-2. In this file, navigate to `events` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `events`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
events:
@@ -40,7 +40,7 @@ events:
{{< notice note >}}
-By default, KubeKey will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了审计功能,KubeKey 将在内部安装 Elasticsearch。对于生产环境,如果你想启用事件,强烈建议你在 **config-sample.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你在安装前提供以下信息,KubeKey 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -56,24 +56,25 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Events, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装事件系统,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Events, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用事件,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `events` and enable Events by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `events`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
events:
@@ -82,7 +83,7 @@ events:
{{< notice note >}}
-By default, ks-installer will install Elasticsearch internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了事件功能,ks-installer 会在内部安装 Elasticsearch。对于生产环境,如果你想启用事件,强烈建议你在 **cluster-configuration.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。当你在安装前提供以下信息时,ks-installer 将直接整合你的外部 Elasticsearch,而不是安装内部 Elasticsearch。
{{ notice >}}
@@ -98,31 +99,31 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Events after Installation
+## 在安装后启用事件
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `events` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `events`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
events:
@@ -131,7 +132,7 @@ events:
{{< notice note >}}
-By default, Elasticsearch will be installed internally if Events is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Events, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了事件,Elasticsearch 将在内部安装。对于生产环境,如果你想启用审计,强烈建议你在这个 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你提供了以下信息,KubeSphere 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -147,7 +148,7 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -155,33 +156,33 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-If you enable both Logging and Events, you can check the status of Events in **Logging** in **Components**. You may see an image as follows:
+如果您同时启用了日志和事件,您可以在**服务组件**的 **Logging** 中查看事件服务状态。您可能会看到如下图片:

-If you only enable Events without Logging installed, you cannot see the image above as the button **Logging** will not display.
+如果只启用事件而不安装日志,则无法看到上面的图片,因为 **Loggiing** 按钮不会显示。
{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n kubesphere-logging-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/pluggable-components/faq/_index.md b/content/zh/docs/pluggable-components/faq/_index.md
new file mode 100644
index 000000000..c3f3d5bdd
--- /dev/null
+++ b/content/zh/docs/pluggable-components/faq/_index.md
@@ -0,0 +1,7 @@
+---
+linkTitle: "FAQ"
+weight: 3550
+
+_build:
+ render: false
+---
diff --git a/content/zh/docs/pluggable-components/faq/logging.md b/content/zh/docs/pluggable-components/faq/logging.md
new file mode 100644
index 000000000..51e6ae60b
--- /dev/null
+++ b/content/zh/docs/pluggable-components/faq/logging.md
@@ -0,0 +1,144 @@
+---
+title: "日志系统"
+keywords: "Kubernetes, Elasticsearch, KubeSphere, Logging, logs"
+description: "FAQ"
+
+linkTitle: "日志系统"
+weight: 3535
+---
+
+## 如何将日志存储改为外部弹性搜索并关闭内部弹性搜索
+
+如果您使用的是 KubeSphere 内部的 Elasticsearch,并且想把它改成您的外部备用,请按照下面的指南操作。否则,如果你还没有启用日志系统,请到[启用日志系统](.../.../logging/)直接设置外部 Elasticsearch。
+
+首先,更新 KubeKey 配置。
+
+```shell
+kubectl edit cc -n kubesphere-system ks-installer
+```
+
+- 将如下 `es.elasticsearchDataXXX`、`es.elasticsearchMasterXXX` 和 `status.logging` 的注释取消。
+
+- 将 `es.externalElasticsearchUrl` 设置为弹性搜索的地址,`es.externalElasticsearchPort` 设置为它的端口号。
+
+```shell
+apiVersion: installer.kubesphere.io/v1alpha1
+kind: ClusterConfiguration
+metadata:
+ name: ks-installer
+ namespace: kubesphere-system
+ ...
+spec:
+ ...
+ common:
+ es:
+ # elasticsearchDataReplicas: 1
+ # elasticsearchDataVolumeSize: 20Gi
+ # elasticsearchMasterReplicas: 1
+ # elasticsearchMasterVolumeSize: 4Gi
+ elkPrefix: logstash
+ logMaxAge: 7
+ externalElasticsearchUrl: <192.168.0.2>
+ externalElasticsearchPort: <9200>
+ ...
+status:
+ ...
+ # logging:
+ # enabledTime: 2020-08-10T02:05:13UTC
+ # status: enabled
+ ...
+```
+
+然后,重新运行 ks-installer。
+
+```shell
+kubectl rollout restart deploy -n kubesphere-system ks-installer
+```
+
+最后,要删除内部的 Elasticsearch,请运行以下命令。请确认你已经备份了内部 Elasticsearch 中的数据。
+
+```shell
+helm uninstall -n kubesphere-logging-system elasticsearch-logging
+```
+
+## 如何在启用 X-Pack Security 的情况下将日志存储改为 Elasticsearch
+
+目前,KubeSphere 不支持与启用 X-Pack Security 的 Elasticsearch 集成。此功能即将推出。
+
+## 如何修改日志数据保留天数
+
+你需要更新 KubeKey 配置并重新运行 ks-installer。
+
+```shell
+kubectl edit cc -n kubesphere-system ks-installer
+```
+
+- 将如下 `status.logging` 的注释取消。
+
+- 将 `es.logMaxAge` 设置为所需天数(默认为 7 天)。
+
+```shell
+apiVersion: installer.kubesphere.io/v1alpha1
+kind: ClusterConfiguration
+metadata:
+ name: ks-installer
+ namespace: kubesphere-system
+ ...
+spec:
+ ...
+ common:
+ es:
+ ...
+ logMaxAge: <7>
+ ...
+status:
+ ...
+ # logging:
+ # enabledTime: 2020-08-10T02:05:13UTC
+ # status: enabled
+ ...
+```
+
+- 重新运行 ks-installer。
+
+```shell
+kubectl rollout restart deploy -n kubesphere-system ks-installer
+```
+
+## 无法从工具箱中的某些节点上的工作负载中找出日志
+
+如果你采用[多节点安装](.../.../installing-on-linux/introduction/multioverview/),并且使用符号链接作为 Docker 根目录,请确保所有节点遵循完全相同的符号链接。日志代理在 DaemonSet 中部署到节点上。容器日志路径的任何差异都可能导致该节点上的收集失败。
+
+要找出节点上的 Docker 根目录路径,可以运行以下命令。确保所有节点都适用相同的值。
+
+```
+docker info -f '{{.DockerRootDir}}'
+```
+
+## 工具箱中的日志查看页面在加载中卡住
+
+如果您发现日志搜索在加载中卡住,请检查您使用的存储系统。例如,配置不当的 NFS 存储系统可能会导致此问题。
+
+## 工具箱显示今天没有日志记录
+
+请检查您的日志容量是否超过了 Elasticsearch 的存储容量限制。如果是,请增加 Elasticsearch 的磁盘容量。
+
+## 在工具箱中查看日志时,报告服务器内部错误
+
+如果您在工具箱中观察到内部服务器错误,可能有几个原因导致此问题。
+
+- 网络分区
+- 无效的 Elasticsearch 主机和端口
+- Elasticsearch 健康状态为红色
+
+## 如何让 KubeSphere 只收集指定工作负载的日志
+
+KubeSphere 的日志代理是由 Fluent Bit 提供的,您需要更新 Fluent Bit 配置来排除某些工作负载的日志。要修改 Fluent Bit 输入配置,请运行以下命令:
+
+```shell
+kubectl edit input -n kubesphere-logging-system tail
+```
+
+更新 `Input.Spec.Tail.ExcludePath` 字段。例如,将路径设置为 `/var/log/containers/*_kube*-system_*.log`,以排除系统组件的任何日志。
+
+更多信息请阅读项目 [Fluent Bit Operator](https://github.com/kubesphere/fluentbit-operator)。
diff --git a/content/zh/docs/pluggable-components/faq/monitoring.md b/content/zh/docs/pluggable-components/faq/monitoring.md
new file mode 100644
index 000000000..72405e686
--- /dev/null
+++ b/content/zh/docs/pluggable-components/faq/monitoring.md
@@ -0,0 +1,107 @@
+---
+title: "监控系统"
+keywords: "Kubernetes, Prometheus, KubeSphere, Monitoring"
+description: "FAQ"
+
+linkTitle: "监控系统"
+weight: 3540
+---
+
+## 如何访问 KubeSphere Prometheus 控制台
+
+KubeSphere 监控引擎由 Prometheus 提供支持。出于调试目的,您可能希望通过 NodePort 访问内置的 Prometheus 服务。要做到这一点,运行以下命令来编辑服务类型。
+
+```shell
+kubectl edit svc -n kubesphere-monitoring-system prometheus-k8s
+```
+
+## Node Exporter 引起的主机端口 9100 冲突
+
+如果有进程占用主机端口 9100,`kubespher-monitoring-system` 下的 Node Exporter 会崩溃。为了解决冲突,你需要终止进程或将 Node Exporter 换到另一个可用端口。
+
+要采用另一个主机端口,例如 `29100`,运行以下命令,将所有 `9100` 替换为 `29100`(需要更改 5 处)。
+
+ ```shell
+ kubectl edit ds -n kubesphere-monitoring-system node-exporter
+ ```
+
+ ```shell
+ apiVersion: apps/v1
+ kind: DaemonSet
+ metadata:
+ name: node-exporter
+ namespace: kubesphere-monitoring-system
+ ...
+ spec:
+ ...
+ template:
+ ...
+ spec:
+ containers:
+ - name: node-exporter
+ image: kubesphere/node-exporter:ks-v0.18.1
+ args:
+ - --web.listen-address=127.0.0.1:9100
+ ...
+ - name: kube-rbac-proxy
+ image: kubesphere/kube-rbac-proxy:v0.4.1
+ args:
+ - --logtostderr
+ - --secure-listen-address=[$(IP)]:9100
+ - --upstream=http://127.0.0.1:9100/
+ ...
+ ports:
+ - containerPort: 9100
+ hostPort: 9100
+ ...
+```
+
+## 与现有的 Prometheus Operator 相冲突
+
+如果你已经自行部署了 Prometheus Operator,请确保在安装 KubeSphere 之前将 Prometheus Operator 删除。否则,可能会出现 KubeSphere 内置的 Prometheus Operator 选择重复的 ServiceMonitor 对象的冲突。
+
+## 如何修改监控数据保留天数
+
+运行下面的命令来编辑最大保留天数。找出 `retention` 字段,并将其更新为所需天数(默认为 7)。
+
+```shell
+kubectl edit prometheuses -n kubesphere-monitoring-system k8s
+```
+
+## kube-scheduler / kube-controller-manager 没有监测数据
+
+首先,请确保标志 `--bind-address` 被设置为 `0.0.0.0`(默认),而不是 `127.0.0.1`。Prometheus 可能需要从其它主机到达这些组件。
+
+随后,请检查 kube-scheduler 和 kube-controller-manager 的端点对象是否存在。如果它们缺失,请通过创建服务选择目标 Pod 手动创建它们。
+
+```shell
+kubectl get ep -n kube-system | grep -E 'kube-scheduler|kube-controller-manager'
+```
+
+## 过去几分钟没有监测数据
+
+请检查您的计算机浏览器本地时钟是否与互联网时间和您的群集同步。时差可能会导致这个问题。特别是您处于在局域网中,可能会出现这种情况。
+
+## 两个节点和控制平面都没有监控数据
+
+请检查您的网络插件,并确保您的主机和 Pod 网络 CIDR 之间没有 IPPool 重叠。KubeSphere 强烈建议您使用 [KubeKey](https://github.com/kubesphere/kubekey) 安装 Kubernetes。
+
+中国读者可以参考 KubeSphere 中文论坛的[讨论](https://kubesphere.com.cn/forum/d/2027/16)了解更多信息。
+
+## Prometheus 产生错误日志:打开存储失败,没有这样的文件或目录
+
+如果 `kubesphere-monitoring-system` 下的 Prometheus Pod 崩溃并产生以下错误日志,您的 Prometheus 数据可能已经损坏,需要手动删除才能恢复。
+
+```
+level=error ts=2020-10-14T17:43:30.485Z caller=main.go:764 err="opening storage failed: block dir: \"/prometheus/01EM0016F8FB33J63RNHFMHK3\": open /prometheus/01EM0016F8FB33J63RNHFMHK3/meta.json: no such file or directory"
+```
+
+执行进入 Prometheus Pod(如果可能的话),并删除目录 `/prometheus/01EM0016F8FB33J63RNHFMHK3`。
+
+```shell
+kubectl exec -it -n kubesphere-monitoring-system prometheus-k8s-0 -c prometheus sh
+
+rm -rf 01EM0016F8FB33J63RNHFMHK3/
+```
+
+或者您可以简单地从链接到 Prometheus PVC 的持久存储卷中删除目录。
diff --git a/content/zh/docs/pluggable-components/logging.md b/content/zh/docs/pluggable-components/logging.md
index da80c2ca8..0e28c7dbf 100644
--- a/content/zh/docs/pluggable-components/logging.md
+++ b/content/zh/docs/pluggable-components/logging.md
@@ -1,25 +1,25 @@
---
-title: "KubeSphere Logging System"
+title: "KubeSphere 日志系统"
keywords: "Kubernetes, Elasticsearch, KubeSphere, Logging, logs"
-description: "How to Enable KubeSphere Logging System"
+description: "如何启用 KubeSphere 日志系统"
-linkTitle: "KubeSphere Logging System"
+linkTitle: "KubeSphere 日志系统"
weight: 3535
---
-## What is KubeSphere Logging System
+## 什么是 KubeSphere 日志系统
-KubeSphere provides a powerful, holistic and easy-to-use logging system for log collection, query and management. It covers logs at varied levels, including tenants, infrastructure resources, and applications. Users can search logs from different dimensions, such as project, workload, Pod and keyword. Compared with Kibana, the tenant-based logging system of KubeSphere features better isolation and security among tenants as each tenant can only view his or her own logs. Apart from KubeSphere's own logging system, the container platform also allows users to add third-party log collectors, such as Elasticsearch, Kafka and Fluentd.
+KubeSphere 为日志收集、查询和管理提供了一个强大的、整体的、易于使用的日志系统。它涵盖了不同层次的日志,包括租户、基础设施资源和应用。用户可以从项目、工作量、Pod 和关键字等不同维度对日志进行搜索。与 Kibana 相比,KubeSphere 基于租户的日志系统具有更好的隔离性和租户之间的安全性,因为每个租户只能查看自己的日志。除了 KubeSphere 自身的日志系统,容器平台还允许用户添加第三方日志收集器,如 Elasticsearch、Kafka 和 Fluentd。
-For more information, see Logging, Events and Auditing.
+有关更多信息,请参阅日志、事件和审计系统。
-## Enable Logging before Installation
+## 在安装前启用日志系统
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -27,11 +27,17 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Logging in this mode (e.g. for testing purpose), refer to the following section to see how Logging can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用日志系统(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用日志系统。
{{ notice >}}
-2. In this file, navigate to `logging` and change `false` to `true` for `enabled`. Save the file after you finish.
+{{< notice warning >}}
+
+如果你采用[多节点安装](.../.../install-on-linux/introduction/multioverview/),并且使用符号链接作为 Docker 根目录,请确保所有节点遵循完全相同的符号链接。日志代理在 DaemonSet 中部署到节点上。容器日志路径的任何差异都可能导致该节点的收集失败。
+
+{{ notice >}}
+
+2. 在该文件中,搜寻到 `logging`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
logging:
@@ -40,7 +46,7 @@ logging:
{{< notice note >}}
-By default, KubeKey will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in **config-sample.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, KubeKey will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了审计功能,KubeKey 将在内部安装 Elasticsearch。对于生产环境,如果你想启用日志系统,强烈建议你在 **config-sample.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你在安装前提供以下信息,KubeKey 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -55,24 +61,26 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchUrl: # The URL of external Elasticsearch
externalElasticsearchPort: # The port of external Elasticsearch
```
-3. Create a cluster using the configuration file:
+
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Logging, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装日志系统,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Logging, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用日志,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `logging` and enable Logging by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `logging`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
logging:
@@ -81,7 +89,7 @@ logging:
{{< notice note >}}
-By default, ks-installer will install Elasticsearch internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in **cluster-configuration.yaml** if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information before installation, ks-installer will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了日志功能,ks-installer 会在内部安装 Elasticsearch。对于生产环境,如果你想启用日志系统,强烈建议你在 **cluster-configuration.yaml** 中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。当你在安装前提供以下信息时,ks-installer 将直接整合你的外部 Elasticsearch,而不是安装内部 Elasticsearch。
{{ notice >}}
@@ -97,31 +105,31 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Logging after Installation
+## 在安装后启用事件
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `logging` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `logging`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
logging:
@@ -130,7 +138,7 @@ logging:
{{< notice note >}}
-By default, Elasticsearch will be installed internally if Logging is enabled. For a production environment, it is highly recommended that you set the following value in this yaml file if you want to enable Logging, especially `externalElasticsearchUrl` and `externalElasticsearchPort`. Once you provide the following information, KubeSphere will integrate your external Elasticsearch directly instead of installing an internal one.
+默认情况下,如果启用了日志系统,Elasticsearch 将在内部安装。对于生产环境,如果你想启用日志,强烈建议你在这个 YAML 文件中设置以下值,尤其是 `externalElasticsearchUrl` 和 `externalElasticsearchPort`。一旦你提供了以下信息,KubeSphere 将直接整合你的外部 Elasticsearch,而不是安装一个内部 Elasticsearch。
{{ notice >}}
@@ -146,7 +154,7 @@ es: # Storage backend for logging, tracing, events and auditing.
externalElasticsearchPort: # The port of external Elasticsearch
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -154,31 +162,31 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-Go to **Components** and check the status of Logging. You may see an image as follows:
+进入**服务组件**,检查 Logging 的状态。你可能会看到如下图片:

{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n kubesphere-logging-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/pluggable-components/network-policy.md b/content/zh/docs/pluggable-components/network-policy.md
index d5023d3c6..868e84380 100644
--- a/content/zh/docs/pluggable-components/network-policy.md
+++ b/content/zh/docs/pluggable-components/network-policy.md
@@ -1,32 +1,32 @@
---
-title: "Network Policy"
+title: "网络策略"
keywords: "Kubernetes, KubeSphere, NetworkPolicy"
-description: "How to Enable Network Policy"
+description: "如何启用网络策略"
-linkTitle: "Network Policy"
+linkTitle: "网络策略"
weight: 3547
---
-## What is Network Policy
+## 什么是网络策略
-Starting from v3.0.0, users can configure network policies of native Kubernetes in KubeSphere. Network Policies are an application-centric construct, enabling you to specify how a pod is allowed to communicate with various network entities over the network. With network policies, users can achieve network isolation within the same cluster, which means firewalls can be set up between certain instances (pods).
+从 v3.0.0 开始,用户可以在 KubeSphere 中配置原生 Kubernetes 的网络策略。网络策略是一种以应用为中心的构造,可以让你指定一个 Pod 如何被允许通过网络与各种网络实体进行通信。通过网络策略,用户可以在同一集群内实现网络隔离,这意味着可以在某些实例(Pod)之间设置防火墙。
{{< notice note >}}
-- Please make sure that the CNI network plugin used by the cluster supports Network Policies before you enable it. There are a number of CNI network plugins that support Network Policies, including Calico, Cilium, Kube-router, Romana and Weave Net.
-- It is recommended that you use [Calico](https://www.projectcalico.org/) as the CNI plugin before you enable Network Policy.
+- 在启用之前,请确保集群使用的 CNI 网络插件支持网络策略。支持网络策略的 CNI 网络插件有很多,包括 Calico、Cilium、Kube-router、Romana 和 Weave Net。
+- 建议您在启用网络策略之前,使用 [Calico](https://www.projectcalico.org/) 作为 CNI 插件。
{{ notice >}}
-For more information, see [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/).
+更多信息请参见[网络政策](https://kubernetes.io/docs/concepts/services-networking/network-policies/)。
-## Enable Network Policy before Installation
+## 在安装前启用网络策略
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -34,73 +34,74 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Network Policy in this mode (e.g. for testing purpose), refer to the following section to see how Network Policy can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用网络策略(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用网络策略。
{{ notice >}}
-2. In this file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `networkpolicy`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
networkpolicy:
enabled: true # Change "false" to "true"
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) for cluster setting. If you want to install Network Policy, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装网络策略,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml). After that, to enable Network Policy, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用网络策略,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `networkpolicy` and enable Network Policy by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `networkpolicy`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
networkpolicy:
enabled: true # Change "false" to "true"
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Network Policy after Installation
+## 在安装后启用网络策略
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `networkpolicy` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `networkpolicy`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
networkpolicy:
enabled: true # Change "false" to "true"
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -108,12 +109,12 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
-If you can see **Network Policies** in **Network** as the image below, it means the installation succeeds as this part won't display until you install the component.
+如果您能在**网络管理**中看到**网络策略**,如下图所示,说明安装成功,因为在安装组件之前,这部分不会显示。
-
\ No newline at end of file
+
diff --git a/content/zh/docs/pluggable-components/overview.md b/content/zh/docs/pluggable-components/overview.md
index cccdb3382..b0e659426 100644
--- a/content/zh/docs/pluggable-components/overview.md
+++ b/content/zh/docs/pluggable-components/overview.md
@@ -1,9 +1,9 @@
---
-title: "Overview"
+title: "概览"
keywords: "Kubernetes, KubeSphere, pluggable-components, overview"
-description: "Overview of installing pluggable components."
+description: "可插拔组件的安装概览"
-linkTitle: "Overview"
+linkTitle: "概览"
weight: 3510
---
diff --git a/content/zh/docs/pluggable-components/service-mesh.md b/content/zh/docs/pluggable-components/service-mesh.md
index 1abff4711..3ade1ed6f 100644
--- a/content/zh/docs/pluggable-components/service-mesh.md
+++ b/content/zh/docs/pluggable-components/service-mesh.md
@@ -1,25 +1,25 @@
---
-title: "KubeSphere Service Mesh"
+title: "KubeSphere 服务网格"
keywords: "Kubernetes, istio, KubeSphere, service-mesh, microservices"
-description: "How to Enable KubeSphere Service Mesh"
+description: "如何启用 KubeSphere 服务网格"
-linkTitle: "KubeSphere Service Mesh"
+linkTitle: "KubeSphere 服务网格"
weight: 3540
---
-## What is KubeSphere Service Mesh
+## 什么是 KubeSphere 服务网格
-On the basis of [Istio](https://istio.io/), KubeSphere Service Mesh visualizes microservices governance and traffic management. It features a powerful toolkit including **circuit breaking, blue-green deployment, canary release, traffic mirroring, distributed tracing, observability and traffic control**. Developers can easily get started with Service Mesh without any code hacking, with the learning curve of Istio greatly reduced. All features of KubeSphere Service Mesh are designed to meet users' demand for their business.
+在 [Istio](https://istio.io/) 的基础上,KubeSphere 服务网格将微服务治理和流量管理可视化。它拥有强大的工具包,包括**断路、蓝绿部署、金丝雀发布、流量镜像、分布式跟踪、可观察性和流量控制**。开发者无需任何代码黑客,即可轻松上手服务网格,Istio 的学习曲线大大降低。KubeSphere 服务网格的所有功能都是为了满足用户的业务需求。
-For more information, see related sections in Project Administration and Usage.
+更多信息请参见项目管理与使用中的相关章节。
-## Enable Service Mesh before Installation
+## 在安装前启用服务网格
-### Installing on Linux
+### 在 Linux 上安装
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+当您在 Linux 上安装 KubeSphere 时,你需要创建一个配置文件,该文件列出了所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 基于[在 Linux 上安装 KubeSphere](.../.../installing-on-linux/introduction/multioverview/) 的教程,您创建了一个默认文件 **config-sample.yaml**。通过执行以下命令修改该文件:
```bash
vi config-sample.yaml
@@ -27,73 +27,74 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable Service Mesh in this mode (e.g. for testing purpose), refer to the following section to see how Service Mesh can be installed after installation.
+如果采用 [All-in-one 安装](.../.../quick-start/all-in-one-on-linux/),则不需要创建 `config-sample.yaml` 文件,因为可以直接创建集群。一般来说,All-in-one 模式是为那些刚刚接触 KubeSphere 并希望熟悉系统的用户准备的。如果您想在这个模式下启用服务网格(比如出于测试的目的),可以参考下面的部分,看看安装后如何启用服务网格。
{{ notice >}}
-2. In this file, navigate to `servicemesh` and change `false` to `true` for `enabled`. Save the file after you finish.
+2. 在该文件中,搜寻到 `servicemesh`,并将 `enabled` 的 `false` 改为 `true`。完成后保存文件。
```bash
servicemesh:
enabled: true # Change "false" to "true"
```
-3. Create a cluster using the configuration file:
+3. 使用配置文件创建一个集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### **Installing on Kubernetes**
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install Service Mesh, do not use `kubectl apply -f` directly for this file.
+在 Kubernetes 上安装 KubeSphere 时,需要下载文件 [cluster-configuration.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml) 进行集群设置。如果要安装网络策略,不要直接使用 `kubectl apply -f` 对这个文件进行设置。
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable Service Mesh, create a local file cluster-configuration.yaml.
+1. 参照[在 Kubernetes 上安装 KubeSphere](.../.../installing-on-kubernetes/introduction/overview/) 的教程,先对文件 [kubesphere-installer.yaml](https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml) 执行 `kubectl apply -f`。之后,为了启用服务网格,创建一个本地文件 `cluster-configuration.yaml`。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, navigate to `servicemesh` and enable Service Mesh by changing `false` to `true` for `enabled`. Save the file after you finish.
+2. 将 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 文件中的所有内容复制到刚才创建的本地文件中。
+
+3. 在这个本地 `cluster-configuration.yaml` 文件中,搜寻到 `servicemesh`,并将 `enabled` 的 `false` 改为 `true`,启用它们。完成后保存文件。
```bash
servicemesh:
enabled: true # Change "false" to "true"
```
-4. Execute the following command to start installation:
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-## Enable Service Mesh after Installation
+## 在安装后启用服务网格
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。点击左上角的**平台管理**,选择**集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD**,在搜索栏中输入 `clusterconfiguration`。点击结果查看其详细页面。
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不增加另一个 API 服务器的情况下创建一种新的资源类型。他们可以像其他任何本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在**资源列表**中,点击 `ks-installer` 右边的三个点,选择**编辑 YAML**。
-
+
-4. In this yaml file, navigate to `servicemesh` and change `false` to `true` for `enabled`. After you finish, click **Update** in the bottom-right corner to save the configuration.
+4. 在这个 YAML 文件中,搜寻到 `servicemesh`,将 `enabled` 的 `false` 改为 `true`。完成后,点击右下角的**更新**,保存配置。
```bash
servicemesh:
enabled: true # Change "false" to "true"
```
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web Kubectl 工具来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -101,31 +102,31 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过点击控制台右下角的锤子图标找到 Kubectl 工具。
{{ notice >}}
-## Verify the Installation of Component
+## 验证组件的安装
{{< tabs >}}
-{{< tab "Verify the Component in Dashboard" >}}
+{{< tab "在仪表板中验证组件的安装" >}}
-Go to **Components** and check the status of Istio. You may see an image as follows:
+进入**服务组件**,检查 Logging 的状态。你可能会看到如下图片:

{{ tab >}}
-{{< tab "Verify the Component through kubectl" >}}
+{{< tab "通过 kubectl 验证组件的安装" >}}
-Execute the following command to check the status of pods:
+执行以下命令来检查 Pod 的状态:
```bash
kubectl get pod -n istio-system
```
-The output may look as follows if the component runs successfully:
+如果组件运行成功,输出结果可能如下:
```bash
NAME READY STATUS RESTARTS AGE
diff --git a/content/zh/docs/project-user-guide/_index.md b/content/zh/docs/project-user-guide/_index.md
index cdf705cd9..5116ee5fb 100644
--- a/content/zh/docs/project-user-guide/_index.md
+++ b/content/zh/docs/project-user-guide/_index.md
@@ -1,13 +1,86 @@
---
-title: "Project User Guide"
-description: "Help you to better manage resources in a KubeSphere project"
+title: "项目用户指南"
+description: "帮助您更好地管理 KubeSphere 项目中的资源"
layout: "single"
-linkTitle: "Project User Guide"
+linkTitle: "项目用户指南"
weight: 4300
icon: "/images/docs/docs.svg"
-
---
-TBD
+在 KubeSphere 中,具有必要权限的项目用户能够执行一系列任务,例如创建各种工作负载,配置卷,密钥和配置,设置各种发布策略,监控应用程序指标以及创建警报策略。 由于 KubeSphere 具有极大的灵活性和兼容性,而无需将任何代码入侵到本地 Kubernetes 中,因此用户可以轻松地开始测试,开发和生产环境所需的任何功能。
+
+## 应用工作负载
+
+### [部署](../project-user-guide/application-workloads/deployments/)
+
+了解部署的基本概念以及如何在 KubeSphere 中创建部署。
+
+### [有状态副本集](../project-user-guide/application-workloads/statefulsets/)
+
+了解 StatefulSet 的基本概念以及如何在 KubeSphere 中创建 StatefulSet。
+
+### [守护进程集](../project-user-guide/application-workloads/daemonsets/)
+
+了解 DaemonSet 的基本概念以及如何在 KubeSphere 中创建 DaemonSet。
+
+### [任务](../project-user-guide/application-workloads/jobs/)
+
+了解 Jobs 的基本概念以及如何在 KubeSphere 中创建 Jobs。
+
+### [定时任务](../project-user-guide/application-workloads/cronjob/)
+
+了解 CronJobs 的基本概念以及如何在 KubeSphere 中创建 CronJobs。
+
+### [服务](../project-user-guide/application-workloads/services/)
+
+了解服务的基本概念以及如何在 KubeSphere 中创建服务。
+
+### [Ingress](../project-user-guide/application-workloads/ingress/)
+
+了解 Ingress 的基本概念(即 Ingress),以及如何在 KubeSphere 中创建 Ingress。
+
+### [容器镜像设置](../project-user-guide/application-workloads/container-image-settings/)
+
+在为工作负载设置容器镜像时,详细了解仪表板上的其他属性。
+
+## 定制应用程序监控
+
+### [介绍](../project-user-guide/custom-application-monitoring/introduction/)
+
+介绍 KubeSphere 自定义监视功能和指标公开,包括公开方法和 ServiceMonitor CRD。
+
+### 开始使用
+
+#### [监控 MySQL](../project-user-guide/custom-application-monitoring/get-started/monitor-mysql/)
+
+部署 MySQL 和 MySQL Exporter 并创建一个仪表板来监视该应用程序。
+
+#### [监视示例 Web](../project-user-guide/custom-application-monitoring/get-started/monitor-sample-web/)
+
+使用 Helm 图表来部署示例 Web 应用程序并创建仪表板以监视该应用程序。
+
+### 可视化
+
+#### [概览](../project-user-guide/custom-application-monitoring/visualization/overview/)
+
+了解创建监控仪表板的一般步骤及其布局。
+
+#### [面板](../project-user-guide/custom-application-monitoring/visualization/panel/)
+
+探索仪表板属性和图表指标。
+
+#### [查询](../project-user-guide/custom-application-monitoring/visualization/querying/)
+
+了解如何指定监控指标。
+
+## 告警
+
+### [告警策略(工作负载级别)](../project-user-guide/alerting/alerting-policy/)
+
+了解如何为工作负载设置告警策略。
+
+### [告警消息(工作负载级别)](../project-user-guide/alerting/alerting-message/)
+
+了解如何查看工作负载的告警策略。
\ No newline at end of file
diff --git a/content/zh/docs/quick-start/all-in-one-on-linux.md b/content/zh/docs/quick-start/all-in-one-on-linux.md
index 7d8c66857..7738075c8 100644
--- a/content/zh/docs/quick-start/all-in-one-on-linux.md
+++ b/content/zh/docs/quick-start/all-in-one-on-linux.md
@@ -1,88 +1,88 @@
---
-title: "All-in-one Installation on Linux"
+title: "Linux 上的 All-in-one 安装"
keywords: 'KubeSphere, Kubernetes, All-in-one, Installation'
description: 'All-in-one Installation on Linux'
-linkTitle: "All-in-one Installation on Linux"
+linkTitle: "在 Linux 上安装 All-in-one KubeSphere"
weight: 3010
---
-For those who are new to KubeSphere and looking for a quick way to discover the platform, the all-in-one mode is your best choice to get started. It features rapid deployment and hassle-free configuration installation with KubeSphere and Kubernetes all provisioned on your machine.
+对于那些刚接触 KubeSphere 的并且想快速上手的用户,all-in-one 安装模式是最佳的选择,它能够帮助您零配置快速部署 KubeSphere 和 Kubernetes。
-## Prerequisites
+## 前提条件
-If your machine is behind a firewall, you need to open relevant ports by following the document [Port Requirements](../../installing-on-linux/introduction/port-firewall/).
+按照文档打开[需要开放的端口](../../installing-on-linux/introduction/port-firewall/)。
-## Step 1: Prepare Linux Machine
+## 步骤 1: 准备 Linux 机器
-See the requirements for hardware and operating system shown below. To get started with all-in-one installation, you only need to prepare one host according to the following requirements.
+请参考下面对机器硬件和操作系统的要求准备一台主机。
-### Hardware Recommendation
+### 建议的机器硬件配置
-| System | Minimum Requirements |
+| 操作系统 | 最低要求 |
| ------------------------------------------------------ | ------------------------------------------- |
-| **Ubuntu** *16.04, 18.04* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
-| **Debian** *Buster, Stretch* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
-| **CentOS** *7*.x | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
-| **Red Hat Enterprise Linux 7** | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
-| **SUSE Linux Enterprise Server 15/openSUSE Leap 15.2** | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
+| **Ubuntu** *16.04, 18.04* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
+| **Debian** *Buster, Stretch* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
+| **CentOS** *7*.x | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
+| **Red Hat Enterprise Linux** *7* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
+| **SUSE Linux Enterprise Server** *15*/**openSUSE Leap** *15.2* | CPU: 2 Cores, Memory: 4 G, Disk Space: 40 G |
{{< notice note >}}
-The system requirements above and the instructions below are for the default minimal installation without any optional components enabled. If your machine has at least 8 cores and 16G memory, it is recommended that you enable all components. For more information, see [Enable Pluggable Components](../../pluggable-components/).
+上面的系统要求和下面的说明适用于没有启用任何可选组件的默认最小安装。如果您的计算机是 8C16G 及以上,则建议启用所有组件。更多信息可以参考 [开启插件](../../pluggable-components/)。
{{ notice >}}
-### Node Requirements
+### 节点的要求
-- The node can be accessed through `SSH`.
-- `sudo`/`curl`/`openssl` should be used.
-- `docker` can be installed by yourself or by KubeKey.
+- 节点必须能够通过 `SSH` 连接。
+- 节点上可以使用 `sudo`/`curl`/`openssl` 命令。
+- 您可以预先自行安装 `docker` 或者通过下面步骤的 [KubeKey](https://github.com/kubesphere/kubekey) 安装。
-{{< notice note >}}
+ {{< notice note >}}
-`docker` must be installed in advance if you want to deploy KubeSphere in an offline environment.
+如果您想离线安装 KubeShpere,那么必须安装好 `docker`。
-{{ notice >}}
+ {{ notice >}}
-### Dependency Requirements
+### 需要安装的依赖项
-KubeKey can install Kubernetes and KubeSphere together. The dependency that needs to be installed may be different based on the Kubernetes version to be installed. You can refer to the list below to see if you need to install relevant dependencies on your node in advance.
+KubeKey 可以将 Kubernetes 和 KubeSphere 一起安装。针对不同的 Kubernetes 版本,需要安装的依赖项可能有所不同。您可以参考下面的列表,查看是否需要提前在节点上安装相关的依赖项。
-| Dependency | Kubernetes Version ≥ 1.18 | Kubernetes Version < 1.18 |
-| ----------- | ------------------------- | ------------------------- |
-| `socat` | Required | Optional but recommended |
-| `conntrack` | Required | Optional but recommended |
-| `ebtables` | Optional but recommended | Optional but recommended |
-| `ipset` | Optional but recommended | Optional but recommended |
+| 依赖项 | Kubernetes 版本 ≥ 1.18 | Kubernetes 版本 < 1.18 |
+| ----------- | ---------------- | ---------------------- |
+| `socat` | 必须 | 可选但建议 |
+| `conntrack` | 必须 | 可选但建议 |
+| `ebtables` | 可选但建议 | 可选但建议 |
+| `ipset` | 可选但建议 | 可选但建议 |
{{< notice info >}}
-Developed in Go language, KubeKey represents a brand-new installation tool as a replacement for the ansible-based installer used before. KubeKey provides users with flexible installation choices, as they can install KubeSphere and Kubernetes separately or install them at one time, which is convenient and efficient.
+KubeKey 是用 Go 语言开发的,是一种全新的安装工具,可以代替以前使用的基于 ansible 的安装程序。KubeKey 为用户提供了灵活的安装选择,可以分别安装 KubeSphere 和 Kubernetes 或二者同时安装,既方便又高效。
{{ notice >}}
-### Network and DNS Requirements
+### 网络和 DNS 配置
-- Make sure the DNS address in `/etc/resolv.conf` is available. Otherwise, it may cause some issues of DNS in clusters.
-- If your network configuration uses Firewall or Security Group, you must ensure infrastructure components can communicate with each other through specific ports. It's recommended that you turn off the firewall or follow the guide [Port Requirements](../../installing-on-linux/introduction/port-firewall/).
+- 必须确保 `/etc/resolv.conf` 中的 DNS 配置是可用的,不然集群中的 DNS 可能会有问题。
+- 如果您的网络配置使用了防火墙或安全组,则必须确保基础组件可以通过特定端口相互通信,可以根据文档[需要开发的端口](../../installing-on-linux/introduction/port-firewall/)的指导将防火墙关闭。
{{< notice tip >}}
-- It is recommended that your OS be clean (without any other software installed). Otherwise, there may be conflicts.
-- It is recommended that a container image mirror (accelerator) be prepared if you have trouble downloading images from dockerhub.io. See [Configure Booster for Installation](../../installing-on-linux/faq/configure-booster/).
+- 建议操作系统处于干净的状态(不安装任何其他软件),否则可能会发生冲突。
+- 如果您无法从 dockerhub.io 下载容器镜像,建议提前准备好容器镜像或者配置镜像加速器。参考 [加速安装的配置](../../installing-on-linux/faq/configure-booster/)。
{{ notice >}}
-## Step 2: Download KubeKey
+## Step 2: 下载 KubeKey
-Follow the step below to download KubeKey.
+请按照以下步骤下载 KubeKey。
{{< tabs >}}
-{{< tab "For users with good network connections to GitHub" >}}
+{{< tab "对于访问 GitHub 较快的用户" >}}
-Download KubeKey from [GitHub Release Page](https://github.com/kubesphere/kubekey/releases/tag/v1.0.0) or use the following command directly.
+ 从 [GitHub Release Page](https://github.com/kubesphere/kubekey/releases/tag/v1.0.0) 下载 KubeKey 或者直接使用下面的命令。
```bash
wget https://github.com/kubesphere/kubekey/releases/download/v1.0.0/kubekey-v1.0.0-linux-amd64.tar.gz -O - | tar -xz
@@ -90,9 +90,9 @@ wget https://github.com/kubesphere/kubekey/releases/download/v1.0.0/kubekey-v1.0
{{ tab >}}
-{{< tab "For users with poor network connections to GitHub" >}}
+{{< tab "对于访问 GitHub 速度比较慢的用户" >}}
-Download KubeKey using the following command:
+使用下面的命令下载 KubeKey:
```bash
wget -c https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz -O - | tar -xz
@@ -102,21 +102,21 @@ wget -c https://kubesphere.io/download/kubekey-v1.0.0-linux-amd64.tar.gz -O - |
{{ tabs >}}
-Make `kk` executable:
+为 `kk` 命令添加可执行权限:
```bash
chmod +x kk
```
-## Step 3: Get Started with Installation
+## Step 3: 开始安装
-In this QuickStart tutorial, you only need to execute one command for installation, the template of which is shown below:
+在本快速入门教程中,您只需执行一个命令即可进行安装,其模板如下所示:
```bash
./kk create cluster [--with-kubernetes version] [--with-kubesphere version]
```
-Create a Kubernetes cluster with KubeSphere installed. Here is an example for your reference:
+下面示例创建安装了 KubeSphere 的 Kubernetes 集群供您参考:
```bash
./kk create cluster --with-kubernetes v1.17.9 --with-kubesphere v3.0.0
@@ -124,31 +124,31 @@ Create a Kubernetes cluster with KubeSphere installed. Here is an example for yo
{{< notice note >}}
-- Supported Kubernetes versions: *v1.15.12*, *v1.16.13*, *v1.17.9* (default), *v1.18.6*.
-- For all-in-one installation, generally speaking, you do not need to change any configuration.
-- KubeKey will install [OpenEBS](https://openebs.io/) to provision LocalPV for development and testing environment by default, which is convenient for new users. For other storage classes, see [Persistent Storage Configuration](../../installing-on-linux/introduction/storage-configuration/).
+- 支持的 Kubernetes 版本: *v1.15.12*, *v1.16.13*, *v1.17.9* (默认), *v1.18.6*.
+- 一般来说,对于 all-in-one 安装,您无需更改任何配置。
+- KubeKey 会默认安装 [OpenEBS](https://openebs.io/) 为开发和测试环境提供 LocalPV,这对用户来说是非常方便的。对于其它的 storage classes,参考 [持久化存储配置](../../installing-on-linux/introduction/storage-configuration/)。
{{ notice >}}
-After you execute the command, you will see a table as below for environment check.
+执行该命令后,将看到下面的表格,用于环境检查。

-Make sure the above components marked with `y` are installed and input `yes` to continue. For details, read [Node Requirements](#node-requirements) and [Dependency Requirements](#dependency-requirements) above.
+确保安装了上面标有 `y` 的组件,并输入 `yes` 继续。更多细节可以参考上面的[节点的要求](#节点的要求)和[需要安装的依赖项](#需要安装的依赖项)。
-## Step 4: Verify the Installation
+## Step 4: 验证安装结果
-When you see the output as below, it means the installation finishes.
+当您看到以下输出时,表明安装已经完成。

-Input the following command to check the result.
+输入以下命令以检查安装结果。
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
-The output displays the IP address and port number of the web console, which is exposed through `NodePort 30880` by default. Now, you can access the console through `EIP:30880` with the default account and password (`admin/P@88w0rd`).
+输出会显示 Web 控制台的 IP 地址和端口号,默认的 NodePort 是 `30880`。现在,您可以使用默认的帐户和密码(`admin /P@88w0rd`)通过 `EIP:30880` 访问控制台。
```bash
#####################################################
@@ -174,17 +174,17 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx
{{< notice note >}}
-You may need to bind EIP and configure port forwarding in your environment for external users to access the console. Besides, make sure the port 30880 is opened in your security groups.
+您可能需要在环境中绑定 `EIP` 并配置端口转发,以供外部用户访问控制台。此外,确保在安全组中打开了 `30880` 端口。
{{ notice >}}
-After logging in the console, you can check the status of different components in **Components**. You may need to wait for some components to be up and running if you want to use related services. You can also use `kubectl get pod --all-namespaces` to inspect the running status of KubeSphere workloads.
+检查完上面的安装日志后,可以到 **Components** 中确认各个组件的安装状态。如果要使用相关服务,可能需要等待某些组件启动并运行。您也可以使用 `kubectl get pod --all-namespaces` 来检查 KubeSphere 相关组件的运行状况。

-## Enable Pluggable Components (Optional)
+## 开启插件 (可选)
-The guide above is used only for minimal installation by default. To enable other components in KubeSphere, see [Enable Pluggable Components](../../pluggable-components/) for more details.
+上面的指南默认情况下仅用于最简单的安装,需要在 KubeSphere 中开启插件,可参考 [开启插件](../../pluggable-components/)。
## Demo
diff --git a/content/zh/docs/quick-start/enable-pluggable-components.md b/content/zh/docs/quick-start/enable-pluggable-components.md
index e8bf3e982..6254b0845 100644
--- a/content/zh/docs/quick-start/enable-pluggable-components.md
+++ b/content/zh/docs/quick-start/enable-pluggable-components.md
@@ -1,44 +1,44 @@
---
-title: "Enable Pluggable Components"
-keywords: 'KubeSphere, Kubernetes, pluggable, components'
-description: 'Enable Pluggable Components'
+title: "启用可插拔功能组件"
+keywords: 'KubeSphere, Kubernetes, 可插拔, 功能组件'
+description: '启用可插拔功能组件'
linkTitle: "Enable Pluggable Components"
weight: 3060
---
-This tutorial demonstrates how to enable pluggable components of KubeSphere both before and after the installation. KubeSphere features ten pluggable components which are listed below.
+本教程演示如何在安装前或安装后启用 KubeSphere 的可插拔组件。KubeSphere 具有以下列出的十个可插拔组件。
-| Configuration Item | Corresponding Component | Description |
+| 配置项 | 功能组件 | 描述 |
| ------------------ | ------------------------------------- | ------------------------------------------------------------ |
-| alerting | KubeSphere alerting system | Enable users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. |
-| auditing | KubeSphere audit log system | Provide a security-relevant chronological set of records, recording the sequence of activities that happen in the platform, initiated by different tenants. |
-| devops | KubeSphere DevOps system | Provide an out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image and Binary-to-Image. |
-| events | KubeSphere events system | Provide a graphical web console for the exporting, filtering and alerting of Kubernetes events in multi-tenant Kubernetes clusters. |
-| logging | KubeSphere logging system | Provide flexible logging functions for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. |
-| metrics_server | HPA | The Horizontal Pod Autoscaler automatically scales the number of pods based on needs. |
-| networkpolicy | Network policy | Allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). |
-| notification | KubeSphere notification system | Allow users to set `AlertManager` as its sender. Receivers include Email, WeChat Work, and Slack. |
-| openpitrix | KubeSphere App Store | Provide an app store for Helm-based applications and allow users to manage apps throughout the entire lifecycle. |
-| servicemesh | KubeSphere Service Mesh (Istio-based) | Provide fine-grained traffic management, observability and tracing, and visualized traffic topology. |
+| alerting | KubeSphere 告警通知系统 | 使用户能够自定义警报策略,以不同的时间间隔和警报级别及时向接收者发送消息。 |
+| auditing | KubeSphere 审计日志系统 | 按时间顺序记录不同租户在平台中的操作活动。 |
+| devops | KubeSphere DevOps 系统 | 一站式 DevOps 方案,内置 Jenkins 流水线与 B2I & S2I。 |
+| events | KubeSphere 事件系统 | 提供一个图形化的web控制台,用于导出、过滤和警告多租户 Kubernetes 集群中的 Kubernetes 事件。 |
+| logging | KubeSphere 日志系统 | 在统一的控制台中提供灵活的日志查询、收集和管理日志功能。可以添加其他日志收集器,例如Elasticsearch、Kafka 和 Fluentd。 |
+| metrics_server | HPA | 能够根据 pod 数量进行动态伸缩,使运行在上面的服务对指标的变化有一定的自适应能力。 |
+| networkpolicy | 网络策略 | 可以在同一个集群内部之间设置网络策略(比如限制或阻止某些实例 pod 之间的网络请求)。 |
+| notification | KubeSphere 通知系统 | 允许用户将 AlertManager 设置为发件人并发送告警邮件。可以使用的方式有:电子邮件、微信和 Slack。 |
+| openpitrix | KubeSphere 应用商店 | 基于 Helm 的应用程序商店,允许用户在整个生命周期中管理应用程序。 |
+| servicemesh | KubeSphere 服务网格 (基于 Istio) | 支持灰度发布、流量拓扑、流量治理、Tracing。 |
-For more information about each component, see Overview of Enable Pluggable Components.
+有关每个组件的更多信息,请参见启用可插拔组件概述。
{{< notice note >}}
-- If you use KubeKey to install KubeSphere on Linux, by default, the above components are not enabled except `metrics_server`. However, `metrics_server` remains disabled in the installer if you install KubeSphere on existing Kubernetes clusters. This is because the component may already be installed in your environment, especially for cloud-hosted Kubernetes clusters.
-- `multicluster` is not covered in this tutorial. If you want to enable this feature, you need to set a corresponding value for `clusterRole`. For more information, see [Multi-cluster Management](../../multicluster-management/).
-- Make sure your machine meets the hardware requirements before the installation. Here is the recommendation if you want to enable all pluggable components: CPU ≥ 8 Cores, Memory ≥ 16 G, Disk Space ≥ 100 G.
+- 如果您使用 KubeKey 在 Linux 上安装 KubeSphere,默认情况下,除了 `metrics_server` 之外,不会启用上述组件。但是,如果在现有的 Kubernetes 集群上安装 KubeSphere,则安装程序中 `metrics_server` 仍处于禁用状态。这是因为组件可能已经安装在您的环境中,特别是对于云托管的 Kubernetes 集群。
+- `multicluster` 不在本教程中介绍。如果要启用此功能,则需要为 `clusterRole` 设置相应的值。有关详细信息,请参见[多群集管理](../../multicluster-management/).
+- 在安装前,请确保您的机器符合硬件要求。如果您想启用所有的可拔插组件,建议您的机器配置如下: CPU ≥ 8 Cores, 内存 ≥ 16 G, 磁盘空间 ≥ 100 G。
{{ notice >}}
-## Enable Pluggable Components before Installation
+## 安装前启用可插拔组件
-### **Installing on Linux**
+### **在 Linux 上安装**
-When you install KubeSphere on Linux, you need to create a configuration file, which lists all KubeSphere components.
+在 Linux 上安装 KubeSphere 时,需要创建一个配置文件,该文件列出所有 KubeSphere 组件。
-1. In the tutorial of [Installing KubeSphere on Linux](../../installing-on-linux/introduction/multioverview/), you create a default file **config-sample.yaml**. Modify the file by executing the following command:
+1. 在教程 [在 Linux 上安装 KubeSphere](../../installing-on-linux/introduction/multioverview/), 你可以创建一个默认配置文件: **config-sample.yaml**。通过执行以下命令来修改文件:
```bash
vi config-sample.yaml
@@ -46,66 +46,65 @@ vi config-sample.yaml
{{< notice note >}}
-If you adopt [All-in-one Installation](../../quick-start/all-in-one-on-linux/), you do not need to create a config-sample.yaml file as you can create a cluster directly. Generally, the all-in-one mode is for users who are new to KubeSphere and look to get familiar with the system. If you want to enable pluggable components in this mode (e.g. for testing purpose), refer to the following section to see how pluggable components can be installed after installation.
+如果您采用 [All-in-one 模式安装](../../quick-start/all-in-one-on-linux/), 您无需创建 config-sample.yaml 文件,因为 all-in-one 模式可以直接创建集群。 通常,all-in-one 模式适用于刚接触 KubeSphere 并希望熟悉该系统的用户。 如果要在此模式下启用可插拔组件(例如,出于测试目的),请参阅以下部分,了解如何在安装后安装可插拔组件。
{{ notice >}}
-2. In this file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [an example file](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md) for your reference. Save the file after you finish.
-3. Create a cluster using the configuration file:
+2. 在此文件中, 将 `enabled` 的值 `false` 改为 `true`。 这是 [示例文件](https://github.com/kubesphere/kubekey/blob/master/docs/config-example.md) 供您参考。修改完成后保存文件。
+3. 使用配置文件创建集群:
```bash
./kk create cluster -f config-sample.yaml
```
-### Installing on Kubernetes
+### 在 Kubernetes 上安装
-When you install KubeSphere on Kubernetes, you need to download the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) for cluster setting. If you want to install pluggable components, do not use `kubectl apply -f` directly for this file.
-
-1. In the tutorial of [Installing KubeSphere on Kubernetes](../../installing-on-kubernetes/introduction/overview/), you execute `kubectl apply -f` first for the file [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml). After that, to enable pluggable components, create a local file cluster-configuration.yaml.
+在 Kubernetes 上安装 KubeSphere 时, 需要下载文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 用于群集设置。如果要安装可插拔组件, 请不要直接使用 `kubectl apply -f` 来应用配置。
+1. [在 Kubernetes 上安装 KubeSphere](../../installing-on-kubernetes/introduction/overview/) 教程中, 您首先通过命令 `kubectl apply -f` 应用配置文件 [kubesphere-installer.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/kubesphere-installer.yaml)。 之后,要启用可插拔组件,请创建本地文件cluster-configuration.yaml。
```bash
vi cluster-configuration.yaml
```
-2. Copy all the content in the file [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) and paste it to the local file just created.
-3. In this local cluster-configuration.yaml file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. Here is [an example file](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) for your reference. Save the file after you finish.
-4. Execute the following command to start installation:
+2. 复制文件 [cluster-configuration.yaml](https://github.com/kubesphere/ks-installer/releases/download/v3.0.0/cluster-configuration.yaml) 中所有的内容并将其粘贴到刚创建的本地文件中。
+3. 在这个本地文件 cluster-configuration.yaml 中, 将 `enabled` 的值 `false` 改为 `true` 。 [示例文件](https://github.com/kubesphere/ks-installer/blob/master/deploy/cluster-configuration.yaml) 供您参考。 编辑完成后请保存文件。
+4. 执行以下命令开始安装:
```bash
kubectl apply -f cluster-configuration.yaml
```
-Whether you install KubeSphere on Linux or on Kubernetes, you can check the status of the components you have enabled in the web console of KubeSphere after installation. Go to **Components**, and you can see an image below:
+无论是在Linux上还是在Kubernetes上安装KubeSphere,安装后都可以在KubeSphere的Web控制台中检查已启用组件的状态。转到 **服务组件**, 您可以看到以下图片:
-
+
-## Enable Pluggable Components after Installation
+## 安装后启用可插拔组件
-KubeSphere web console provides a convenient way for users to view and operate on different resources. To enable pluggable components after installation, you only need to make few adjustments in the console directly. For those who are accustomed to the Kubernetes command-line tool, kubectl, they will have no difficulty in using KubeSphere as the tool is integrated into the console.
+KubeSphere Web 控制台使用户更方便来查看和使用不同的资源。 要在安装后启用可插拔组件,只需要在控制台中直接进行一些调整。 对于那些习惯使用 Kubernete s命令行工具 kubectl 的人来说,由于该工具已集成到控制台中,因此使用 KubeSphere 将毫无困难。
-1. Log in the console as `admin`. Click **Platform** in the top-left corner and select **Clusters Management**.
+1. 以 `admin` 身份登录控制台。 点击左上角的 **平台管理** ,然后选择 **集群管理**。
-
+
-2. Click **CRDs** and enter `clusterconfiguration` in the search bar. Click the result to view its detailed page.
+2. 点击 **自定义资源 CRD** 然后在搜索栏中输入 `clusterconfiguration` 单击结果以查看其详细页面。
-
+
{{< notice info >}}
-A Custom Resource Definition (CRD) allows users to create a new type of resources without adding another API server. They can use these resources like any other native Kubernetes objects.
+自定义资源定义(CRD)允许用户在不添加其他 API 服务器的情况下创建新类型的资源。它们可以像其他本地 Kubernetes 对象一样使用这些资源。
{{ notice >}}
-3. In **Resource List**, click the three dots on the right of `ks-installer` and select **Edit YAML**.
+3. 在 **资源列表** 中, 点击 `ks-installer` 右侧的三个点,然后选择右侧的 **编辑配置文件**。
-
+
-4. In this yaml file, enable the pluggable components you want to install by changing `false` to `true` for `enabled`. After you finish, click **Update** to save the configuration.
+4. 在配置文件中, 将 `enabled` 的 `false` 更改为 `true` ,以启用要安装的可插拔组件。 完成后,单击 **更新** 以保存配置。
-
+
-5. You can use the web kubectl to check the installation process by executing the following command:
+5. 您可以通过执行以下命令,使用 Web kubectl 来检查安装过程:
```bash
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
@@ -113,11 +112,11 @@ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=
{{< notice tip >}}
-You can find the web kubectl tool by clicking the hammer icon in the bottom-right corner of the console.
+您可以通过单击控制台右下角的锤子图标来找到 web kubectl 工具。
{{ notice >}}
-6. The output will display a message as below if the component is successfully installed.
+6. 如果组件安装成功,输出将显示以下消息。
```bash
#####################################################
@@ -141,12 +140,12 @@ https://kubesphere.io 20xx-xx-xx xx:xx:xx
#####################################################
```
-7. In **Components**, you can see the status of different components.
+7. 在 **服务组件** 中, 您可以查看不同组件的状态。
-
+
{{< notice tip >}}
-If you do not see relevant components in the above image, some pods may not be ready yet. You can execute `kubectl get pod --all-namespaces` through kubectl to see the status of pods.
+如果您在上图中看不到相关组件,可能是一些 pod 尚未准备好。 您可以通过 kubectl 执行 `kubectl get pod --all-namespaces` 来查看 pod 的状态。
{{ notice >}}
diff --git a/content/zh/docs/quick-start/wordpress-deployment.md b/content/zh/docs/quick-start/wordpress-deployment.md
index b1586be59..d648cbb0e 100644
--- a/content/zh/docs/quick-start/wordpress-deployment.md
+++ b/content/zh/docs/quick-start/wordpress-deployment.md
@@ -1,199 +1,198 @@
---
-title: "Compose and Deploy Wordpress"
+title: "创建 Wordpress 应用并发布至 Kubernetes"
keywords: 'KubeSphere, Kubernetes, app, Wordpress'
-description: 'Compose and deploy Wordpress.'
+description: '创建 Wordpress 应用并发布至 Kubernetes。'
linkTitle: "Compose and Deploy Wordpress"
weight: 3050
---
-## WordPress Introduction
+## WordPress 简介
-WordPress is a free and open-source content management system written in PHP, allowing users to build their own websites. A complete Wordpress application includes the following Kubernetes objects with MySQL serving as the backend database.
+WordPress 是使用 PHP 语言开发的内容管理系统软件,用户可以在支持 PHP 和 MySQL 数据库的服务器上使用自己的博客,一个完整的 Wordpress 应用程序包括以下 Kubernetes 对象。
-
+
-## Objective
+## 目的
-This tutorial demonstrates how to create an application (WordPress as an example) in KubeSphere and access it outside the cluster.
+本教程演示如何在 KubeSphere 中创建应用程序(以 WordPress 为例)并在集群外访问它。
-## Prerequisites
+## 准备工作
-An account `project-regular` is needed with the role `operator` assigned in one of your projects (the user has been invited to the project). For more information, see [Create Workspace, Project, Account and Role](../create-workspace-and-project/).
+需要一个 `project regular` 帐户,并在其中一个项目中分配角色 `operator`(该用户已被邀请参加该项目)。有关详细信息,请参见[创建企业空间、项目、帐户和角色](../create-workspace-and-project/).
-## Estimated Time
+## 预计操作时间
-About 15 minutes.
+大约15分钟。
-## Hands-on Lab
+## 动手操作
-### Task 1: Create Secrets
+### 任务 1: 创建密钥
-#### Create a MySQL Secret
+#### 创建 MySQL 密钥
-The environment variable `WORDPRESS_DB_PASSWORD` is the password to connect to the database in WordPress. In this step, you need to create a ConfigMap to store the environment variable that will be used in MySQL pod template.
+环境变量 `WORDPRESS_DB_PASSWORD` 是连接到 WORDPRESS 数据库的密码。在这一步中,您需要创建一个 ConfigMap 来存储将在 MySQL pod 模板中使用的环境变量。
-1. Log in KubeSphere console using the account `project-regular`. Go to the detailed page of `demo-project` and navigate to **Configurations**. In **Secrets**, click **Create** on the right.
+1. 使用`project regular`帐户登录 KubeSphere 控制台。转到`demo project`的详细页面并导航到 **配置**。在 **密钥** 中,单击右侧的 **创建**。
-
+
-2. Enter the basic information (e.g. name it `mysql-secret`) and click **Next**. In the next page, select **Default** for **Type** and click **Add Data** to add a key-value pair. Input the Key (`MYSQL_ROOT_PASSWORD`) and Value (`123456`) as below and click `√` in the bottom-right corner to confirm. When you finish, click **Create** to continue.
+2. 输入基本信息 (例如,将其命名为 `mysql-secret`) ,然后单击 **下一步**。在下一页中, 选择 **类型** 为 **默认** ,然后单击 **添加数据** 来添加一个键值对。 输入键(Key) `MYSQL_ROOT_PASSWORD` 和值(Value) `123456` 单击右下角 `√` 的确认按钮, 完成后,单击 **创建** 按钮并继续.
-
+
-#### Create a WordPress Secret
+#### 创建 WordPress 密钥
-Follow the same steps above to create a WordPress secret `wordpress-secret` with the key `WORDPRESS_DB_PASSWORD` and value `123456`. Secrets created display in the list as below:
+按照上面创建 MySQL 密钥相同的步骤创建一个名字为 `wordpress-secret` 的密钥, 输入键(Key) `WORDPRESS_DB_PASSWORD` 和值(Value) `123456`,创建的密钥显示在列表中,如下所示:
-
+
-### Task 2: Create a Volume
+### 任务 2: 创建存储卷
-1. Go to **Volumes** under **Storage** and click **Create**.
+1. 点击 **存储管理** 下面的 **存储卷** 并单击 **创建**。
-
+
-2. Enter the basic information of the volume (e.g. name it `wordpress-pvc`) and click **Next**.
-3. In **Volume Settings**, you need to choose an available **Storage Class**, and set **Access Mode** and **Volume Capacity**. You can use the default value directly as shown below. Click **Next** to continue.
+2. 输入卷的基本信息 (例如,将其命名为 `wordpress-pvc`) ,然后单击 **下一步**。
+3. 在 **存储卷设置**, 您需要选择一个可用的 **存储类型**, 并设置 **访问模式** 和 **存储卷容量**。您可以直接使用默认值,如下所示。单击 **下一步** 继续。
-
+
-4. For **Advanced Settings**, you do not need to add extra information for this task and click **Create** to finish.
+4. 对于 **高级设置**,您不需要为当前操作设置额外的配置,单击 **创建** 即可完成。
-### Task 3: Create an Application
+### 任务 3: 创建应用程序
-#### Add MySQL backend component
+#### 添加 MySQL 后端组件
-1. Navigate to **Applications** under **Application Workloads**, select **Composing App** and click **Create Composing Application**.
+1. 导航到 **应用负载** 下的 **应用**, 选择 **自制应用** 然后单击 **构建自制应用**。
-
+
-2. Enter the basic information (e.g. input `wordpress` for Application Name) and click **Next**.
+2. 输入基本信息 (例如在应用程序名称中输入 `wordpress`) 然后单击 **下一步**。
-
+
-3. In **Components**, click **Add Service** to set a component in the app.
+3. 在 **服务组件** 中, 单击 **添加服务** 在应用程序中设置组件。
-
+
-4. Define a service type for the component. Select **Stateful Service** here.
-5. Enter the name for the stateful service (e.g. **mysql**) and click **Next**.
+4. 设置组件的服务类型, 选择 **服务类型** 。
+5. 输入有状态服务的名称 (例如 **mysql**) ,然后单击 **下一步**。
-
+
-6. In **Container Image**, click **Add Container Image**.
+6. 在 **容器镜像** 中, 单击 **添加容器镜像**。
-
+
-7. Enter `mysql:5.6` in the search box, press **Enter** and click **Use Default Ports**. After that, do not click `√` in the bottom-right corner as the setting is not finished yet.
+7. 在镜像搜索框中输入 `mysql:5.6` , 单击 **回车键** 然后单击 **使用默认端口**。 配置还没有设置完成,请您不要点击右下角的 `√` 按钮。
-
+
{{< notice note >}}
-In **Advanced Settings**, make sure the memory limit is no less than 1000 Mi or MySQL may fail to start due to a lack of memory.
+在 **高级设置** 中, 请确保内存限制不小于 1000 Mi,否则 MySQL 可能因内存不足而无法启动。
{{ notice >}}
-8. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Input the name `MYSQL_ROOT_PASSWORD` and choose the resource `mysql-secret` and the key `MYSQL_ROOT_PASSWORD` created in the previous step. Click `√` after you finish and **Next** to continue.
+8. 向下滚动到 **环境变量** 然后单击 **引用配置文件或密匙**。 输入名称 `MYSQL_ROOT_PASSWORD` 然后选择资源 `mysql-secret` 和前面步骤中创建的密钥 `MYSQL_ROOT_PASSWORD` 完成后单击 `√` 保存配置 ,最后单击 **下一步** 继续。
-
+
-9. Select **Add Volume Template** in **Mount Volumes**. Input the value of **Volume Name** (`mysql`) and **Mount Path** (mode: `ReadAndWrite`, path: `/var/lib/mysql`) as below:
+9. 选择 **挂载存储** 中的 **添加存储卷**。 输入 **存储卷名称** (`mysql`) 和 **存储类型** (类型: `读写`, 路径: `/var/lib/mysql`)的值,如下所示:
-
+
-Click `√` after you finish and click **Next** to continue.
+完成后单击 `√` 保存设置并单击 **下一步** 继续。
-10. In **Advanced Settings**, you can click **Add** directly or select other options based on your needs.
+10. 在 **高级设置** 中, 不需要设置,您可以直接点击 **添加** ,也可以根据需要选择其他选项。
-
+
-11. At this point, the MySQL component has beed added as shown below:
+11. 现在,您添加了MySQL组件,如下所示:
-
+
-#### Add WordPress frontend component
+#### 添加 WordPress 前端组件
-12. Click **Add Service** again and select **Stateless Service** this time. Enter the name `wordpress` and click Next.
+12. 点击 **添加服务** 选择 **无状态服务** 输入名称 `wordpress` 然后单击 **下一步**。
-
+
-13. Similar to the step above, click **Add Container Image**, enter `wordpress:4.8-apache` in the search box, press **Enter** and click **Use Default Ports**.
+13. 与上述步骤类似,单击 **添加容器镜像**, 在搜索框中输入 `wordpress:4.8-apache` 镜像, 按 **回车** ,然后单击 **使用默认端口**。
-
+
-14. Scroll down to **Environment Variables** and click **Use ConfigMap or Secret**. Two environment variables need to be added here. Enter the values according to the screenshot below.
-
-- For `WORDPRESS_DB_PASSWORD`, choose `wordpress-secret` and `WORDPRESS_DB_PASSWORD` created in Task 1.
-- Click **Add Environment Variable**, and enter `WORDPRESS_DB_HOST` and `mysql` for the key and value.
+14. 向下滚动到 **环境变量** 然后单击 **引用配置文件或密匙**,这里需要添加两个环境变量。根据下面的屏幕截图输入值。
+- 对于 `WORDPRESS_DB_PASSWORD`, 请选择在任务 1 中创建的 `wordpress-secret` 和 `WORDPRESS_DB_PASSWORD`。
+- 单击 **添加环境变量**, 输入 `WORDPRESS_DB_HOST` 和 `mysql` 作为作为键(Key)和值(Value)。
{{< notice warning >}}
-For the second environment variable added here, the value must be exactly the same as the name you set for MySQL in step 5. Otherwise, Wordpress cannot connect to the corresponding database of MySQL.
+对于此处添加的第二个环境变量,该值必须与步骤 5 中为 MySQL 设置的名称完全相同。否则,Wordpress 将无法连接到 MySQL 对应的数据库。
{{ notice >}}
-
+
-Click `√` to save it and **Next** to continue.
+单击 `√` 保存配置,单击 **下一步** 继续。
-15. In **Mount Volumes**, click **Add Volume** and select **Choose an existing volume**.
+15. 在 **挂载存储** 设置中, 单击 **添加存储卷** 然后单击 **选择已有存储卷**。
-
+
-
+
-16. Select `wordpress-pvc` created in the previous step, set the mode as `ReadAndWrite`, and input `/var/www/html` as its mount path. Click `√` to save it and **Next** to continue.
+16. 选择上一步创建的 `wordpress-pvc`, 将模式设置为 `读写`,并输入装载路径 `/var/www/html` 。 单击 `√` 保存并单击 **下一步** 继续。
-
+
-17. In **Advanced Settings**, you can click **Add** directly or select other options based on your needs.
+17. 在 **高级设置** 中, 您可以直接点击 **添加** 创建服务,也可以根据需要选择其他选项。
-
+
-18. The frontend component is also set now. Click **Next** to continue.
+18. 现在也设置了前端组件。单击 **下一步** 继续。
-
+
-19. You can set route rules (Ingress) here or click **Create** directly.
+19. 您可以在这里设置路由规则(入口),也可以直接单击 **创建** 来创建应用程序。
-
+
-20. The app will display in the list below after you create it.
+20. 创建后,应用程序将显示在下面的列表中。
-
+
-### Task 4: Verify the Resources
+### Task 4: 验证资源
-In **Workloads**, check the status of `wordpress-v1` and `mysql-v1` in **Deployments** and **StatefulSets** respectively. If they are running as shown in the image below, it means WordPress has been created successfully.
+在 **工作负载** 中,分别检查 **部署** 和 **有状态副本集** 中的 `wordpress-v1` 和 `mysql-v1` 的状态。 如果他们运行如下图所示,这意味着 WordPress 已经成功创建。
-
+
-
+
-### Task 5: Access WordPress through NodePort
+### Task 5: 通过 NodePort 访问 WordPress
-1. To access the service outside the cluster, navigate to **Services** first. Click the three dots on the right of `wordpress` and select **Edit Internet Access**.
+1. 要访问集群外的服务,请首先导航到 **服务** 。 单击 `wordpress` 右侧的三个点后选择 **编辑外网访问**。
-
+
-2. Select `NodePort` for **Access Method** and click **OK**.
+2. 在 **访问方式** 中选择 `NodePort`, 然后单击 **确定**。
-
+
-3. Click the service and you can see the port exposed.
+3. 单击服务,您可以看到暴露的端口。
-
+
-4. Access this application via `{Node IP}:{NodePort}` and you can see an image as below:
+4. 通过 `{Node IP}:{NodePort}` 访问此应用程序,您可以看到下图:
-
+
{{< notice note >}}
-Make sure the port is opened in your security groups before you access the service.
+在访问服务之前,请确保安全组中的端口已打开。
{{ notice >}}
\ No newline at end of file
diff --git a/content/zh/partner/_index.md b/content/zh/partner/_index.md
index 11c7f024c..99e75420d 100644
--- a/content/zh/partner/_index.md
+++ b/content/zh/partner/_index.md
@@ -26,7 +26,7 @@ section3:
tip: Apply now →
partnerType:
- title: "App Providers"
- content: "KubeSphere Application Store is a great place to showcase your applications. KubeSphere brings your applications to tens of thousands of users, allowing them to deploy your App to Kubernetes with one click."
+ content: "KubeSphere App Store is a great place to showcase your applications. KubeSphere brings your applications to tens of thousands of users, allowing them to deploy your App to Kubernetes with one click."
link: "request"
- title: "Technology"
diff --git a/content/zh/reason/_index.md b/content/zh/reason/_index.md
deleted file mode 100644
index e9ce638d8..000000000
--- a/content/zh/reason/_index.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: "reason"
-
----
\ No newline at end of file
diff --git a/data/en/footer.yaml b/data/en/footer.yaml
index 008b5e7f1..b22b4664c 100644
--- a/data/en/footer.yaml
+++ b/data/en/footer.yaml
@@ -36,13 +36,13 @@ footer:
- title: KubeSphere Docs
list:
- content: Introduction
- link: 'https://kubesphere.io/docs/'
+ link: '../../../docs/introduction/what-is-kubesphere/'
- content: Installation
- link: 'https://kubesphere.io/docs/installation/intro/'
+ link: '../../../docs/quick-start/all-in-one-on-linux/'
- content: Tutorial
- link: 'https://kubesphere.io/docs/quick-start/admin-quick-start/'
+ link: '../../../docs/quick-start/create-workspace-and-project/'
- content: API Documentation
- link: 'https://kubesphere.io/docs/api-reference/api-docs/'
+ link: '../../../docs/api-reference/api-docs/'
- title: Community
list:
diff --git a/data/en/video.json b/data/en/video.json
index c9da6c5f4..af8238c7a 100644
--- a/data/en/video.json
+++ b/data/en/video.json
@@ -16,7 +16,7 @@
"tag": ""
},
{
- "title": "Helm-based Application Management in KubeSphere Application Store",
+ "title": "Helm-based Application Management in KubeSphere App Store",
"link": "https://www.youtube.com/embed/h1Mmcr4wnF8?&autoplay=1",
"createTime": "2020.4.25",
"snapshot": "https://pek3b.qingstor.com/kubesphere-docs/png/20200619153336.png",
diff --git a/data/zh/footer.yaml b/data/zh/footer.yaml
index 8da8469f4..53503c3d1 100644
--- a/data/zh/footer.yaml
+++ b/data/zh/footer.yaml
@@ -37,13 +37,13 @@ footer:
- title: 文档中心
list:
- content: 产品介绍
- link: 'https://kubesphere.io/docs/'
+ link: '../../../zh/docs/introduction/what-is-kubesphere/'
- content: 如何安装
- link: 'https://kubesphere.io/docs/installation/intro/'
+ link: '../../../zh/docs/quick-start/all-in-one-on-linux/'
- content: 快速入门
- link: 'https://kubesphere.io/docs/quick-start/admin-quick-start/'
+ link: '../../../zh/docs/quick-start/create-workspace-and-project/'
- content: API 文档
- link: 'https://kubesphere.io/docs/api-reference/api-docs/'
+ link: '../../../docs/api-reference/api-docs/'
- title: 开源社区
list:
diff --git a/i18n/zh.yaml b/i18n/zh.yaml
index e1fbbf2da..b3c70b844 100644
--- a/i18n/zh.yaml
+++ b/i18n/zh.yaml
@@ -71,7 +71,7 @@
- id: share
translation: 分享
- id: note
- translation: 注解
+ translation: 备注
- id: tip
translation: 提示
- id: info
diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html
index 9ede4c316..463d380c3 100644
--- a/layouts/_default/baseof.html
+++ b/layouts/_default/baseof.html
@@ -5,6 +5,12 @@
+{{ if .Site.Params.addGoogleTag }}
+
+
+
+{{ end }}
{{- partial "header.html" . -}}
{{- block "main" . }}{{- end }}
diff --git a/layouts/api/single.html b/layouts/api/single.html
new file mode 100644
index 000000000..8d56f8246
--- /dev/null
+++ b/layouts/api/single.html
@@ -0,0 +1,26 @@
+
+
+
+
+
+ {{ with .Params.Title }}{{ . }}{{ else }}{{ with .Site.Params.title }}{{ . }}{{ end }}{{ end }}
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/layouts/docs/single.html b/layouts/docs/single.html
index ec9d703f9..fb343d06a 100644
--- a/layouts/docs/single.html
+++ b/layouts/docs/single.html
@@ -5,6 +5,12 @@
+{{ if .Site.Params.addGoogleTag }}
+
+
+
+{{ end }}
{{- partial "header.html" . -}}
diff --git a/layouts/forum/list.html b/layouts/forum/list.html
deleted file mode 100644
index ab437fffc..000000000
--- a/layouts/forum/list.html
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-
-
- {{ .Title }}
-
-
-内容
-
-
\ No newline at end of file
diff --git a/layouts/partials/content.html b/layouts/partials/content.html
index fefc76c2e..bae02b3be 100644
--- a/layouts/partials/content.html
+++ b/layouts/partials/content.html
@@ -15,6 +15,12 @@
+{{ if .Site.Params.addGoogleTag }}
+
+
+
+{{ end }}
{{ partial "header.html" $context }}
diff --git a/layouts/partials/head.html b/layouts/partials/head.html
index e2922dcc3..e2d444fd6 100644
--- a/layouts/partials/head.html
+++ b/layouts/partials/head.html
@@ -24,3 +24,30 @@
{{ end }}
+
+{{ if .Site.Params.addGoogleAnalytics }}
+
+
+{{ end }}
+
+{{ if .Site.Params.addGoogleTag }}
+
+
+
+
+
+{{ end }}
+
+
+
diff --git a/layouts/reason/list.html b/layouts/reason/list.html
deleted file mode 100644
index e88c14c25..000000000
--- a/layouts/reason/list.html
+++ /dev/null
@@ -1,6 +0,0 @@
-{{ define "main" }}
-
-
-
内容
-
-{{ end }}
\ No newline at end of file
diff --git a/layouts/trends/list.html b/layouts/trends/list.html
deleted file mode 100644
index c01276cf5..000000000
--- a/layouts/trends/list.html
+++ /dev/null
@@ -1,6 +0,0 @@
-{{ define "main" }}
-
-
-
内容
-
-{{ end }}
\ No newline at end of file
diff --git a/static/files/application-templates/nginx-0.1.0.tgz b/static/files/application-templates/nginx-0.1.0.tgz
new file mode 100644
index 000000000..e699df31b
Binary files /dev/null and b/static/files/application-templates/nginx-0.1.0.tgz differ
diff --git a/static/images/ApiDocs.svg b/static/images/ApiDocs.svg
new file mode 100644
index 000000000..899b4251e
--- /dev/null
+++ b/static/images/ApiDocs.svg
@@ -0,0 +1,26 @@
+
+
\ No newline at end of file
diff --git a/static/images/application-templates/20190717152831.png b/static/images/application-templates/20190717152831.png
new file mode 100644
index 000000000..7950ef965
Binary files /dev/null and b/static/images/application-templates/20190717152831.png differ
diff --git a/static/images/application-templates/20190717152929.png b/static/images/application-templates/20190717152929.png
new file mode 100644
index 000000000..3e5184aa3
Binary files /dev/null and b/static/images/application-templates/20190717152929.png differ
diff --git a/static/images/application-templates/20200106161804.png b/static/images/application-templates/20200106161804.png
new file mode 100644
index 000000000..b4bf897a3
Binary files /dev/null and b/static/images/application-templates/20200106161804.png differ
diff --git a/static/images/application-templates/20200106162219.png b/static/images/application-templates/20200106162219.png
new file mode 100644
index 000000000..19894f7bf
Binary files /dev/null and b/static/images/application-templates/20200106162219.png differ
diff --git a/static/images/application-templates/20200106171747.png b/static/images/application-templates/20200106171747.png
new file mode 100644
index 000000000..d47582f62
Binary files /dev/null and b/static/images/application-templates/20200106171747.png differ
diff --git a/static/images/application-templates/20200106172151.png b/static/images/application-templates/20200106172151.png
new file mode 100644
index 000000000..62dc3fde6
Binary files /dev/null and b/static/images/application-templates/20200106172151.png differ
diff --git a/static/images/application-templates/20200106172416.png b/static/images/application-templates/20200106172416.png
new file mode 100644
index 000000000..d8b29698c
Binary files /dev/null and b/static/images/application-templates/20200106172416.png differ
diff --git a/static/images/application-templates/20200106172532.png b/static/images/application-templates/20200106172532.png
new file mode 100644
index 000000000..548f68a57
Binary files /dev/null and b/static/images/application-templates/20200106172532.png differ
diff --git a/static/images/application-templates/20200106172837.png b/static/images/application-templates/20200106172837.png
new file mode 100644
index 000000000..7fa80e046
Binary files /dev/null and b/static/images/application-templates/20200106172837.png differ
diff --git a/static/images/application-templates/20200106173434.png b/static/images/application-templates/20200106173434.png
new file mode 100644
index 000000000..38bbeb22d
Binary files /dev/null and b/static/images/application-templates/20200106173434.png differ
diff --git a/static/images/application-templates/20200106173531.png b/static/images/application-templates/20200106173531.png
new file mode 100644
index 000000000..e960c5b95
Binary files /dev/null and b/static/images/application-templates/20200106173531.png differ
diff --git a/static/images/application-templates/20201028180736.png b/static/images/application-templates/20201028180736.png
new file mode 100644
index 000000000..5e9d3d91c
Binary files /dev/null and b/static/images/application-templates/20201028180736.png differ
diff --git a/static/images/application-templates/20201028180853.png b/static/images/application-templates/20201028180853.png
new file mode 100644
index 000000000..9b14989cc
Binary files /dev/null and b/static/images/application-templates/20201028180853.png differ
diff --git a/static/images/application-templates/20201028181426.png b/static/images/application-templates/20201028181426.png
new file mode 100644
index 000000000..50dba4025
Binary files /dev/null and b/static/images/application-templates/20201028181426.png differ
diff --git a/static/images/application-templates/20201028181614.png b/static/images/application-templates/20201028181614.png
new file mode 100644
index 000000000..4be740816
Binary files /dev/null and b/static/images/application-templates/20201028181614.png differ
diff --git a/static/images/application-templates/20201028181834.png b/static/images/application-templates/20201028181834.png
new file mode 100644
index 000000000..196b71f35
Binary files /dev/null and b/static/images/application-templates/20201028181834.png differ
diff --git a/static/images/application-templates/20201028181957.png b/static/images/application-templates/20201028181957.png
new file mode 100644
index 000000000..c13011d34
Binary files /dev/null and b/static/images/application-templates/20201028181957.png differ
diff --git a/static/images/application-templates/20201028182251.png b/static/images/application-templates/20201028182251.png
new file mode 100644
index 000000000..1e1ba2d84
Binary files /dev/null and b/static/images/application-templates/20201028182251.png differ
diff --git a/static/images/application-templates/20201109150224.png b/static/images/application-templates/20201109150224.png
new file mode 100644
index 000000000..821fbd6ba
Binary files /dev/null and b/static/images/application-templates/20201109150224.png differ
diff --git a/static/images/application-templates/add-repo.png b/static/images/application-templates/add-repo.png
new file mode 100644
index 000000000..a268b10ef
Binary files /dev/null and b/static/images/application-templates/add-repo.png differ
diff --git a/static/images/application-templates/aduit-records.png b/static/images/application-templates/aduit-records.png
new file mode 100644
index 000000000..65fe9edbc
Binary files /dev/null and b/static/images/application-templates/aduit-records.png differ
diff --git a/static/images/application-templates/app-deploy.png b/static/images/application-templates/app-deploy.png
new file mode 100644
index 000000000..ae5ed4edd
Binary files /dev/null and b/static/images/application-templates/app-deploy.png differ
diff --git a/static/images/application-templates/app-detail-test-deploy.png b/static/images/application-templates/app-detail-test-deploy.png
new file mode 100644
index 000000000..a1fb0f45e
Binary files /dev/null and b/static/images/application-templates/app-detail-test-deploy.png differ
diff --git a/static/images/application-templates/app-list.png b/static/images/application-templates/app-list.png
new file mode 100644
index 000000000..6672152b4
Binary files /dev/null and b/static/images/application-templates/app-list.png differ
diff --git a/static/images/application-templates/app-portal.png b/static/images/application-templates/app-portal.png
new file mode 100644
index 000000000..fce8db6a0
Binary files /dev/null and b/static/images/application-templates/app-portal.png differ
diff --git a/static/images/application-templates/app-review.png b/static/images/application-templates/app-review.png
new file mode 100644
index 000000000..ab77ed303
Binary files /dev/null and b/static/images/application-templates/app-review.png differ
diff --git a/static/images/application-templates/app-store.png b/static/images/application-templates/app-store.png
new file mode 100644
index 000000000..ee73207b4
Binary files /dev/null and b/static/images/application-templates/app-store.png differ
diff --git a/static/images/application-templates/app-template-en.png b/static/images/application-templates/app-template-en.png
new file mode 100644
index 000000000..97ba428c8
Binary files /dev/null and b/static/images/application-templates/app-template-en.png differ
diff --git a/static/images/application-templates/app-template-zn.png b/static/images/application-templates/app-template-zn.png
new file mode 100644
index 000000000..7376597b0
Binary files /dev/null and b/static/images/application-templates/app-template-zn.png differ
diff --git a/static/images/application-templates/app-templates-draft.png b/static/images/application-templates/app-templates-draft.png
new file mode 100644
index 000000000..6a7808afc
Binary files /dev/null and b/static/images/application-templates/app-templates-draft.png differ
diff --git a/static/images/application-templates/app-templates.png b/static/images/application-templates/app-templates.png
new file mode 100644
index 000000000..06041f5cb
Binary files /dev/null and b/static/images/application-templates/app-templates.png differ
diff --git a/static/images/application-templates/choose-new-application.png b/static/images/application-templates/choose-new-application.png
new file mode 100644
index 000000000..2a36441d8
Binary files /dev/null and b/static/images/application-templates/choose-new-application.png differ
diff --git a/static/images/application-templates/create-accounts.png b/static/images/application-templates/create-accounts.png
new file mode 100644
index 000000000..890eaaf14
Binary files /dev/null and b/static/images/application-templates/create-accounts.png differ
diff --git a/static/images/application-templates/create-app-repo.png b/static/images/application-templates/create-app-repo.png
new file mode 100644
index 000000000..3d9f02d98
Binary files /dev/null and b/static/images/application-templates/create-app-repo.png differ
diff --git a/static/images/application-templates/create-app.png b/static/images/application-templates/create-app.png
new file mode 100644
index 000000000..c95bfbee7
Binary files /dev/null and b/static/images/application-templates/create-app.png differ
diff --git a/static/images/application-templates/create-roles.png b/static/images/application-templates/create-roles.png
new file mode 100644
index 000000000..54b8249d4
Binary files /dev/null and b/static/images/application-templates/create-roles.png differ
diff --git a/static/images/application-templates/deploy-emqx.png b/static/images/application-templates/deploy-emqx.png
new file mode 100644
index 000000000..123724121
Binary files /dev/null and b/static/images/application-templates/deploy-emqx.png differ
diff --git a/static/images/application-templates/deploy-instance.png b/static/images/application-templates/deploy-instance.png
new file mode 100644
index 000000000..4ca43d542
Binary files /dev/null and b/static/images/application-templates/deploy-instance.png differ
diff --git a/static/images/application-templates/deploy-new-application.png b/static/images/application-templates/deploy-new-application.png
new file mode 100644
index 000000000..9844a37b5
Binary files /dev/null and b/static/images/application-templates/deploy-new-application.png differ
diff --git a/static/images/application-templates/e.png b/static/images/application-templates/e.png
new file mode 100644
index 000000000..f4a99d9f6
Binary files /dev/null and b/static/images/application-templates/e.png differ
diff --git a/static/images/application-templates/edit-app-info.png b/static/images/application-templates/edit-app-info.png
new file mode 100644
index 000000000..5d4644327
Binary files /dev/null and b/static/images/application-templates/edit-app-info.png differ
diff --git a/static/images/application-templates/edit-template.png b/static/images/application-templates/edit-template.png
new file mode 100644
index 000000000..5d4644327
Binary files /dev/null and b/static/images/application-templates/edit-template.png differ
diff --git a/static/images/application-templates/emqx-active.png b/static/images/application-templates/emqx-active.png
new file mode 100644
index 000000000..22f8d2c7a
Binary files /dev/null and b/static/images/application-templates/emqx-active.png differ
diff --git a/static/images/application-templates/emqx-new-version.png b/static/images/application-templates/emqx-new-version.png
new file mode 100644
index 000000000..850922388
Binary files /dev/null and b/static/images/application-templates/emqx-new-version.png differ
diff --git a/static/images/application-templates/emqx.png b/static/images/application-templates/emqx.png
new file mode 100644
index 000000000..d77e25249
Binary files /dev/null and b/static/images/application-templates/emqx.png differ
diff --git a/static/images/application-templates/iot-category.png b/static/images/application-templates/iot-category.png
new file mode 100644
index 000000000..8f65eaa73
Binary files /dev/null and b/static/images/application-templates/iot-category.png differ
diff --git a/static/images/application-templates/iot-emqx.png b/static/images/application-templates/iot-emqx.png
new file mode 100644
index 000000000..b282cfa97
Binary files /dev/null and b/static/images/application-templates/iot-emqx.png differ
diff --git a/static/images/application-templates/release-app.png b/static/images/application-templates/release-app.png
new file mode 100644
index 000000000..3043674c5
Binary files /dev/null and b/static/images/application-templates/release-app.png differ
diff --git a/static/images/application-templates/review.png b/static/images/application-templates/review.png
new file mode 100644
index 000000000..d22ea343c
Binary files /dev/null and b/static/images/application-templates/review.png differ
diff --git a/static/images/application-templates/select-deploy-location.png b/static/images/application-templates/select-deploy-location.png
new file mode 100644
index 000000000..5f9970559
Binary files /dev/null and b/static/images/application-templates/select-deploy-location.png differ
diff --git a/static/images/application-templates/submit-new-version.png b/static/images/application-templates/submit-new-version.png
new file mode 100644
index 000000000..86aeb21d1
Binary files /dev/null and b/static/images/application-templates/submit-new-version.png differ
diff --git a/static/images/application-templates/submit-review.png b/static/images/application-templates/submit-review.png
new file mode 100644
index 000000000..a21eac416
Binary files /dev/null and b/static/images/application-templates/submit-review.png differ
diff --git a/static/images/application-templates/submitted.png b/static/images/application-templates/submitted.png
new file mode 100644
index 000000000..a6074c85e
Binary files /dev/null and b/static/images/application-templates/submitted.png differ
diff --git a/static/images/application-templates/test-deploy.png b/static/images/application-templates/test-deploy.png
new file mode 100644
index 000000000..f7beb7a41
Binary files /dev/null and b/static/images/application-templates/test-deploy.png differ
diff --git a/static/images/application-templates/upload-app1.png b/static/images/application-templates/upload-app1.png
new file mode 100644
index 000000000..55317738f
Binary files /dev/null and b/static/images/application-templates/upload-app1.png differ
diff --git a/static/images/application-templates/upload-app2.png b/static/images/application-templates/upload-app2.png
new file mode 100644
index 000000000..2991db4e8
Binary files /dev/null and b/static/images/application-templates/upload-app2.png differ
diff --git a/static/images/application-templates/upload-app3.png b/static/images/application-templates/upload-app3.png
new file mode 100644
index 000000000..a67a348d8
Binary files /dev/null and b/static/images/application-templates/upload-app3.png differ
diff --git a/static/images/application-templates/upload-app4.png b/static/images/application-templates/upload-app4.png
new file mode 100644
index 000000000..7adac0cab
Binary files /dev/null and b/static/images/application-templates/upload-app4.png differ
diff --git a/static/images/application-templates/upload-emqx-new-version.png b/static/images/application-templates/upload-emqx-new-version.png
new file mode 100644
index 000000000..8a86b8bc4
Binary files /dev/null and b/static/images/application-templates/upload-emqx-new-version.png differ
diff --git a/static/images/application-templates/upload-icons.png b/static/images/application-templates/upload-icons.png
new file mode 100644
index 000000000..6bf3f0743
Binary files /dev/null and b/static/images/application-templates/upload-icons.png differ
diff --git a/static/images/application-templates/upload-templates.png b/static/images/application-templates/upload-templates.png
new file mode 100644
index 000000000..a7474b13e
Binary files /dev/null and b/static/images/application-templates/upload-templates.png differ
diff --git a/static/images/application-templates/validate-repo.png b/static/images/application-templates/validate-repo.png
new file mode 100644
index 000000000..ab08cf3c8
Binary files /dev/null and b/static/images/application-templates/validate-repo.png differ
diff --git a/static/images/application-templates/validate-repo2.png b/static/images/application-templates/validate-repo2.png
new file mode 100644
index 000000000..f9e689e10
Binary files /dev/null and b/static/images/application-templates/validate-repo2.png differ
diff --git a/static/images/devops/20200107230822.png b/static/images/devops/20200107230822.png
deleted file mode 100644
index b4d57205c..000000000
Binary files a/static/images/devops/20200107230822.png and /dev/null differ
diff --git a/static/images/devops/20200107232100.png b/static/images/devops/20200107232100.png
deleted file mode 100644
index 046427e8a..000000000
Binary files a/static/images/devops/20200107232100.png and /dev/null differ
diff --git a/static/images/devops/20200108134653.png b/static/images/devops/20200108134653.png
deleted file mode 100644
index d004e44e9..000000000
Binary files a/static/images/devops/20200108134653.png and /dev/null differ
diff --git a/static/images/devops/create-dockerhub-id.png b/static/images/devops/create-dockerhub-id.png
new file mode 100644
index 000000000..c926d3b8d
Binary files /dev/null and b/static/images/devops/create-dockerhub-id.png differ
diff --git a/static/images/devops/create-kubeconfig.png b/static/images/devops/create-kubeconfig.png
new file mode 100644
index 000000000..fd74c1119
Binary files /dev/null and b/static/images/devops/create-kubeconfig.png differ
diff --git a/static/images/devops/devops-go-ok.png b/static/images/devops/devops-go-ok.png
new file mode 100644
index 000000000..3e686ec86
Binary files /dev/null and b/static/images/devops/devops-go-ok.png differ
diff --git a/static/images/devops/dockerhub-create-token.png b/static/images/devops/dockerhub-create-token.png
new file mode 100644
index 000000000..6c77244d4
Binary files /dev/null and b/static/images/devops/dockerhub-create-token.png differ
diff --git a/static/images/devops/dockerhub-settings.png b/static/images/devops/dockerhub-settings.png
new file mode 100644
index 000000000..c3f2842b4
Binary files /dev/null and b/static/images/devops/dockerhub-settings.png differ
diff --git a/static/images/devops/dockerhub-token-copy.png b/static/images/devops/dockerhub-token-copy.png
new file mode 100644
index 000000000..097ba1506
Binary files /dev/null and b/static/images/devops/dockerhub-token-copy.png differ
diff --git a/static/images/devops/dockerhub-token-ok.png b/static/images/devops/dockerhub-token-ok.png
new file mode 100644
index 000000000..6945124de
Binary files /dev/null and b/static/images/devops/dockerhub-token-ok.png differ
diff --git a/static/images/devops/edit-jenkinsfile.png b/static/images/devops/edit-jenkinsfile.png
new file mode 100644
index 000000000..cb62136db
Binary files /dev/null and b/static/images/devops/edit-jenkinsfile.png differ
diff --git a/static/images/devops/jenkins-edit-1.png b/static/images/devops/jenkins-edit-1.png
deleted file mode 100644
index 2ce517dc9..000000000
Binary files a/static/images/devops/jenkins-edit-1.png and /dev/null differ
diff --git a/static/images/devops/jenkins-edit-2.png b/static/images/devops/jenkins-edit-2.png
deleted file mode 100644
index afa54784e..000000000
Binary files a/static/images/devops/jenkins-edit-2.png and /dev/null differ
diff --git a/static/images/devops/maven-project-jenkins.png b/static/images/devops/maven-project-jenkins.png
new file mode 100644
index 000000000..8741ff959
Binary files /dev/null and b/static/images/devops/maven-project-jenkins.png differ
diff --git a/static/images/devops/run-maven-pipeline.png b/static/images/devops/run-maven-pipeline.png
new file mode 100644
index 000000000..5d473374b
Binary files /dev/null and b/static/images/devops/run-maven-pipeline.png differ
diff --git a/static/images/devops/service-view.png b/static/images/devops/service-view.png
deleted file mode 100644
index 558ff0b41..000000000
Binary files a/static/images/devops/service-view.png and /dev/null differ
diff --git a/static/images/devops/view-credential-list.png b/static/images/devops/view-credential-list.png
new file mode 100644
index 000000000..b123b0756
Binary files /dev/null and b/static/images/devops/view-credential-list.png differ
diff --git a/static/images/devops/view-edit-jenkinsfile.png b/static/images/devops/view-edit-jenkinsfile.png
new file mode 100644
index 000000000..c11f1a5e5
Binary files /dev/null and b/static/images/devops/view-edit-jenkinsfile.png differ
diff --git a/static/images/devops/view-namespace.png b/static/images/devops/view-namespace.png
new file mode 100644
index 000000000..25de900c7
Binary files /dev/null and b/static/images/devops/view-namespace.png differ
diff --git a/static/images/devops/view-result-maven-pipeline.png b/static/images/devops/view-result-maven-pipeline.png
new file mode 100644
index 000000000..854987762
Binary files /dev/null and b/static/images/devops/view-result-maven-pipeline.png differ
diff --git a/static/images/devops/view-result-maven-workload-svc.png b/static/images/devops/view-result-maven-workload-svc.png
new file mode 100644
index 000000000..706378f01
Binary files /dev/null and b/static/images/devops/view-result-maven-workload-svc.png differ
diff --git a/static/images/devops/view-result-maven-workload.png b/static/images/devops/view-result-maven-workload.png
new file mode 100644
index 000000000..729d98b48
Binary files /dev/null and b/static/images/devops/view-result-maven-workload.png differ
diff --git a/static/images/docs/access-control-and-account-management/github1.png b/static/images/docs/access-control-and-account-management/github1.png
new file mode 100644
index 000000000..de4c5f9e1
Binary files /dev/null and b/static/images/docs/access-control-and-account-management/github1.png differ
diff --git a/static/images/docs/access-control-and-account-management/github2.png b/static/images/docs/access-control-and-account-management/github2.png
new file mode 100644
index 000000000..24dbbe616
Binary files /dev/null and b/static/images/docs/access-control-and-account-management/github2.png differ
diff --git a/static/images/docs/access-control-and-account-management/github3.png b/static/images/docs/access-control-and-account-management/github3.png
new file mode 100644
index 000000000..08c745a54
Binary files /dev/null and b/static/images/docs/access-control-and-account-management/github3.png differ
diff --git a/static/images/docs/access-control-and-account-management/github4.png b/static/images/docs/access-control-and-account-management/github4.png
new file mode 100644
index 000000000..94e91a086
Binary files /dev/null and b/static/images/docs/access-control-and-account-management/github4.png differ
diff --git a/static/images/docs/access-control-and-account-management/oauth2.svg b/static/images/docs/access-control-and-account-management/oauth2.svg
new file mode 100644
index 000000000..b7cb295e5
--- /dev/null
+++ b/static/images/docs/access-control-and-account-management/oauth2.svg
@@ -0,0 +1,27 @@
+
\ No newline at end of file
diff --git a/static/images/docs/alerting/mail_server_config-zh.png b/static/images/docs/alerting/mail_server_config-zh.png
new file mode 100644
index 000000000..d68a000d3
Binary files /dev/null and b/static/images/docs/alerting/mail_server_config-zh.png differ
diff --git a/static/images/docs/alerting/mail_server_guide-zh.png b/static/images/docs/alerting/mail_server_guide-zh.png
new file mode 100644
index 000000000..de4294108
Binary files /dev/null and b/static/images/docs/alerting/mail_server_guide-zh.png differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.jpg
new file mode 100644
index 000000000..cfc4256de
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ04.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.jpg
new file mode 100644
index 000000000..a85da7de6
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitMQ11.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.jpg
new file mode 100644
index 000000000..3f8f13c92
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitma-dashboard-detail.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.jpg
new file mode 100644
index 000000000..6ac99b505
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq-dashboard.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.jpg
new file mode 100644
index 000000000..0c2bd2125
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq01.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.jpg
new file mode 100644
index 000000000..2ab4d2735
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq02.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.jpg
new file mode 100644
index 000000000..c88a5e90d
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq021.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.jpg
new file mode 100644
index 000000000..801fe0af1
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq03.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.jpg
new file mode 100644
index 000000000..e06810853
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq05.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.jpg
new file mode 100644
index 000000000..9dc5447a7
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq06.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.jpg
new file mode 100644
index 000000000..c003c3e54
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq07.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.jpg
new file mode 100644
index 000000000..c452dda12
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq08.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.jpg b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.jpg
new file mode 100644
index 000000000..1950d0ad0
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/rabbitmq-app/rabbitmq09.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/access-tomcat-browser.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/access-tomcat-browser.jpg
new file mode 100644
index 000000000..b26472eeb
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/access-tomcat-browser.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.jpg
new file mode 100644
index 000000000..359047378
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/click-deploy.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.jpg
new file mode 100644
index 000000000..bcd077843
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/click-next.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.jpg
new file mode 100644
index 000000000..fe1f601a5
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/click-tomcat-service.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.jpg
new file mode 100644
index 000000000..33412cd60
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/deploy-tomcat.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.jpg
new file mode 100644
index 000000000..b6c7b24f8
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/edit-internet-access.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.jpg
new file mode 100644
index 000000000..bf3ef25b0
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/exposed-port.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.jpg
new file mode 100644
index 000000000..457643d86
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/find-tomcat.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.jpg
new file mode 100644
index 000000000..850db616b
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/nodeport.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.jpg
new file mode 100644
index 000000000..cc76c3bf3
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-app01.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.jpg
new file mode 100644
index 000000000..dd48e0dba
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-running.jpg differ
diff --git a/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.jpg
new file mode 100644
index 000000000..662f1c05a
Binary files /dev/null and b/static/images/docs/appstore/built-in-apps/tomcat-app/tomcat-teminal-icon.jpg differ
diff --git a/static/images/docs/tomcat-app/tomcat-app10.jpg b/static/images/docs/appstore/built-in-apps/tomcat-app/view-project.jpg
similarity index 100%
rename from static/images/docs/tomcat-app/tomcat-app10.jpg
rename to static/images/docs/appstore/built-in-apps/tomcat-app/view-project.jpg
diff --git a/static/images/docs/appstore/memcached/choose_memcached_from_app_store.png b/static/images/docs/appstore/memcached/choose_memcached_from_app_store.png
new file mode 100644
index 000000000..872c8a8cb
Binary files /dev/null and b/static/images/docs/appstore/memcached/choose_memcached_from_app_store.png differ
diff --git a/static/images/docs/appstore/memcached/connect_memcached.png b/static/images/docs/appstore/memcached/connect_memcached.png
new file mode 100644
index 000000000..b2b57f511
Binary files /dev/null and b/static/images/docs/appstore/memcached/connect_memcached.png differ
diff --git a/static/images/docs/appstore/memcached/deploy_memcached.png b/static/images/docs/appstore/memcached/deploy_memcached.png
new file mode 100644
index 000000000..d230ac35c
Binary files /dev/null and b/static/images/docs/appstore/memcached/deploy_memcached.png differ
diff --git a/static/images/docs/appstore/memcached/deploy_memcached_confirm.png b/static/images/docs/appstore/memcached/deploy_memcached_confirm.png
new file mode 100644
index 000000000..4ed8c14d6
Binary files /dev/null and b/static/images/docs/appstore/memcached/deploy_memcached_confirm.png differ
diff --git a/static/images/docs/appstore/memcached/memcached_active.png b/static/images/docs/appstore/memcached/memcached_active.png
new file mode 100644
index 000000000..f204d76ee
Binary files /dev/null and b/static/images/docs/appstore/memcached/memcached_active.png differ
diff --git a/static/images/docs/appstore/memcached/view_memcached_workload.png b/static/images/docs/appstore/memcached/view_memcached_workload.png
new file mode 100644
index 000000000..e49812d0f
Binary files /dev/null and b/static/images/docs/appstore/memcached/view_memcached_workload.png differ
diff --git a/static/images/docs/appstore/postgresql/choose_postgresql_from_app_store.png b/static/images/docs/appstore/postgresql/choose_postgresql_from_app_store.png
new file mode 100644
index 000000000..d5081a34a
Binary files /dev/null and b/static/images/docs/appstore/postgresql/choose_postgresql_from_app_store.png differ
diff --git a/static/images/docs/appstore/postgresql/connect_postgresql.png b/static/images/docs/appstore/postgresql/connect_postgresql.png
new file mode 100644
index 000000000..2ca024d91
Binary files /dev/null and b/static/images/docs/appstore/postgresql/connect_postgresql.png differ
diff --git a/static/images/docs/appstore/postgresql/deploy_postgresql.png b/static/images/docs/appstore/postgresql/deploy_postgresql.png
new file mode 100644
index 000000000..3ae377c94
Binary files /dev/null and b/static/images/docs/appstore/postgresql/deploy_postgresql.png differ
diff --git a/static/images/docs/appstore/postgresql/deploy_postgresql_confirm.png b/static/images/docs/appstore/postgresql/deploy_postgresql_confirm.png
new file mode 100644
index 000000000..4f4f18f7f
Binary files /dev/null and b/static/images/docs/appstore/postgresql/deploy_postgresql_confirm.png differ
diff --git a/static/images/docs/appstore/postgresql/expose_postgresql_service.png b/static/images/docs/appstore/postgresql/expose_postgresql_service.png
new file mode 100644
index 000000000..d7f8aed0a
Binary files /dev/null and b/static/images/docs/appstore/postgresql/expose_postgresql_service.png differ
diff --git a/static/images/docs/appstore/postgresql/get_postgresql_secret.png b/static/images/docs/appstore/postgresql/get_postgresql_secret.png
new file mode 100644
index 000000000..69dce3b92
Binary files /dev/null and b/static/images/docs/appstore/postgresql/get_postgresql_secret.png differ
diff --git a/static/images/docs/appstore/postgresql/postgresql_active.png b/static/images/docs/appstore/postgresql/postgresql_active.png
new file mode 100644
index 000000000..d7b7a82a2
Binary files /dev/null and b/static/images/docs/appstore/postgresql/postgresql_active.png differ
diff --git a/static/images/docs/appstore/postgresql/view_postgresql_service.png b/static/images/docs/appstore/postgresql/view_postgresql_service.png
new file mode 100644
index 000000000..554ce17a6
Binary files /dev/null and b/static/images/docs/appstore/postgresql/view_postgresql_service.png differ
diff --git a/static/images/docs/cluster-administration/cluster-status-monitoring-zh/platform.png b/static/images/docs/cluster-administration/cluster-status-monitoring-zh/platform.png
deleted file mode 100644
index a9c183314..000000000
Binary files a/static/images/docs/cluster-administration/cluster-status-monitoring-zh/platform.png and /dev/null differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/clusters-management-select.jpg b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/clusters-management-select.jpg
new file mode 100644
index 000000000..af7192622
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/clusters-management-select.jpg differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-basic-info.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-basic-info.png
new file mode 100644
index 000000000..426a07fc4
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-basic-info.png differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png
new file mode 100644
index 000000000..7b6ed59d5
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-settings.png differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png
new file mode 100644
index 000000000..f74a7988f
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/create-storage-class-storage-system.png differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.jpg b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.jpg
new file mode 100644
index 000000000..2bbb0207c
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/custom-storage-class.jpg differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg
new file mode 100644
index 000000000..1719018e1
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-class.jpg differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png
new file mode 100644
index 000000000..71ec77725
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-system.png differ
diff --git a/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.jpg b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.jpg
new file mode 100644
index 000000000..778db73e2
Binary files /dev/null and b/static/images/docs/cluster-administration/persistent-volume-and-storage-class/storage-volume-qingcloud.jpg differ
diff --git a/static/images/docs/copy.png b/static/images/docs/copy.png
new file mode 100644
index 000000000..615e8b53d
Binary files /dev/null and b/static/images/docs/copy.png differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/activity-failure.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/activity-failure.jpg
new file mode 100644
index 000000000..c2f7fedb4
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/activity-failure.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg
new file mode 100644
index 000000000..2491ae0d0
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/advanced-setting.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg
new file mode 100644
index 000000000..ec5efe2e9
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/branch-settings.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg
new file mode 100644
index 000000000..7220e6326
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/commit-changes.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.jpg
new file mode 100644
index 000000000..8696bb46f
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg
new file mode 100644
index 000000000..462e8e9d9
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/create-pipeline.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/credential-list.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/credential-list.jpg
new file mode 100644
index 000000000..2db5edfa7
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/credential-list.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg
new file mode 100644
index 000000000..d3dbfe740
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/fork-github-repo.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.jpg
new file mode 100644
index 000000000..84cb9871e
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-1.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.jpg
new file mode 100644
index 000000000..693031b3c
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/generate-github-token-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.jpg
new file mode 100644
index 000000000..78be5f1be
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-1.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg
new file mode 100644
index 000000000..eae1fc456
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/inspect-pipeline-log-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.jpg
new file mode 100644
index 000000000..d41f40cb0
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-1.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.jpg
new file mode 100644
index 000000000..8ea2fadb3
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkins-edit-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg
new file mode 100644
index 000000000..5c2a2a533
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/jenkinsfile-online.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.jpg
new file mode 100644
index 000000000..dfc24089d
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-detail.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-list.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-list.jpg
new file mode 100644
index 000000000..5e85d2262
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-list.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.jpg
new file mode 100644
index 000000000..52bb7828a
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/pipeline-proceed.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/project-list.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/project-list.jpg
new file mode 100644
index 000000000..5803c5287
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/project-list.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.jpg
new file mode 100644
index 000000000..a404c0fe7
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/remove-behavioral-strategy.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/select-repo.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/select-repo.jpg
new file mode 100644
index 000000000..94d02fb14
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/select-repo.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg
new file mode 100644
index 000000000..b2aa50c68
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/sonar-token.jpg differ
diff --git a/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg
new file mode 100644
index 000000000..cef06029d
Binary files /dev/null and b/static/images/docs/devops-user-guide/create-a-pipeline-using-a-jenkinsfile/tag-name.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg
new file mode 100644
index 000000000..228025f22
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/copied-jenkins.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.jpg
new file mode 100644
index 000000000..f0f23e7a0
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-dockerhub-id.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg
new file mode 100644
index 000000000..0b8cb75bf
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-kubeconfig.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.jpg
new file mode 100644
index 000000000..842dcbaf1
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.jpg
new file mode 100644
index 000000000..17c8ff60a
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/create-pipeline.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg
new file mode 100644
index 000000000..781b9ddb3
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/credential-docker-create.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg
new file mode 100644
index 000000000..3ee96852b
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-1.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg
new file mode 100644
index 000000000..4ce1e0a2d
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/docker-image-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg
new file mode 100644
index 000000000..0b08d9e0e
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-create-token.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg
new file mode 100644
index 000000000..f3bb5c305
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-settings.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg
new file mode 100644
index 000000000..10205c4cd
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-copy.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg
new file mode 100644
index 000000000..20670b3a0
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/dockerhub-token-ok.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.jpg
new file mode 100644
index 000000000..7bc5d0375
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/edit-jenkinsfile.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png
new file mode 100644
index 000000000..33a019dd0
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/multi-cluster-ok.png differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg
new file mode 100644
index 000000000..7b4382dc8
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/pipeline-running.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg
new file mode 100644
index 000000000..ef324bfa1
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/run-pipeline.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.jpg
new file mode 100644
index 000000000..38cdc3e59
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/set-pipeline-name.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg
new file mode 100644
index 000000000..7b447f341
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-multi-cluster-project/view-deployments.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg
new file mode 100644
index 000000000..228025f22
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/copied-jenkins.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.jpg
new file mode 100644
index 000000000..f0f23e7a0
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-dockerhub-id.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg
new file mode 100644
index 000000000..0b8cb75bf
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-kubeconfig.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.jpg
new file mode 100644
index 000000000..842dcbaf1
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.jpg
new file mode 100644
index 000000000..17c8ff60a
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/create-pipeline.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg
new file mode 100644
index 000000000..781b9ddb3
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/credential-docker-create.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg
new file mode 100644
index 000000000..3ee96852b
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-1.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg
new file mode 100644
index 000000000..4ce1e0a2d
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/docker-image-2.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg
new file mode 100644
index 000000000..0b08d9e0e
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-create-token.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg
new file mode 100644
index 000000000..f3bb5c305
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-settings.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg
new file mode 100644
index 000000000..10205c4cd
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-copy.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg
new file mode 100644
index 000000000..20670b3a0
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/dockerhub-token-ok.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.jpg
new file mode 100644
index 000000000..7bc5d0375
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/edit-jenkinsfile.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.jpg
new file mode 100644
index 000000000..7b4382dc8
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/pipeline-running.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.jpg
new file mode 100644
index 000000000..ef324bfa1
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/run-pipeline.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.jpg
new file mode 100644
index 000000000..38cdc3e59
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/set-pipeline-name.jpg differ
diff --git a/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.jpg b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.jpg
new file mode 100644
index 000000000..7b447f341
Binary files /dev/null and b/static/images/docs/devops-user-guide/examples/compile-and-deploy-a-go-project/view-deployments.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/access-endpoint.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/access-endpoint.jpg
new file mode 100644
index 000000000..1e4b0c880
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/access-endpoint.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/devops-prod.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/devops-prod.jpg
new file mode 100644
index 000000000..834cf2484
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/devops-prod.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/docker-hub-result.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/docker-hub-result.jpg
new file mode 100644
index 000000000..cc41884e8
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/docker-hub-result.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/github-result.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/github-result.jpg
new file mode 100644
index 000000000..232e779da
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/github-result.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/pipeline-deployments.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/pipeline-deployments.jpg
new file mode 100644
index 000000000..a96d38cb2
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/pipeline-deployments.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sample-app-result-check.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sample-app-result-check.jpg
new file mode 100644
index 000000000..af0f93a22
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sample-app-result-check.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail-1.jpg.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail-1.jpg.jpg
new file mode 100644
index 000000000..1fa37c5a7
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail-1.jpg.jpg differ
diff --git a/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail.jpg b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail.jpg
new file mode 100644
index 000000000..e3e078b22
Binary files /dev/null and b/static/images/docs/devops-user-guide/integrate-sonarqube-into-pipeline/sonarqube-result-detail.jpg differ
diff --git a/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/access-key.jpg b/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/access-key.jpg
new file mode 100644
index 000000000..f6f22d881
Binary files /dev/null and b/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/access-key.jpg differ
diff --git a/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/storage-zone.jpg b/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/storage-zone.jpg
new file mode 100644
index 000000000..235209a0d
Binary files /dev/null and b/static/images/docs/installing-on-linux/introduction/persistent-storage-configuration/storage-zone.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-0.png b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-0.png
new file mode 100644
index 000000000..1c2f50241
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-0.png differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-1.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-1.jpg
new file mode 100644
index 000000000..9612de6ad
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-1.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-2.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-2.jpg
new file mode 100644
index 000000000..d244b0253
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-2.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-3.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-3.jpg
new file mode 100644
index 000000000..03197f7a0
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-3.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.jpg
new file mode 100644
index 000000000..cbd266a00
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-4.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-5.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-5.jpg
new file mode 100644
index 000000000..f8dab844f
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-5.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.jpg
new file mode 100644
index 000000000..c0ce64129
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-6.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.jpg
new file mode 100644
index 000000000..107343fd4
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-7.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.jpg
new file mode 100644
index 000000000..28d729e06
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/blue-green-job-list.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.jpg b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.jpg
new file mode 100644
index 000000000..6dbb9ad94
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/blue-green-deployment/version2-deployment.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/cabary-release-3.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/cabary-release-3.jpg
new file mode 100644
index 000000000..34e1d32b2
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/cabary-release-3.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-0.png b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-0.png
new file mode 100644
index 000000000..077d89e1c
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-0.png differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.jpg
new file mode 100644
index 000000000..3f86f7e46
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-4.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.gif b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.gif
new file mode 100644
index 000000000..b61f8c672
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-5.gif differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.jpg
new file mode 100644
index 000000000..76a4f7093
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-6.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.jpg
new file mode 100644
index 000000000..6657aa954
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/canary-release-job.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/create-canary-release.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/create-canary-release.jpg
new file mode 100644
index 000000000..04131f344
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/create-canary-release.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.jpg
new file mode 100644
index 000000000..9db26dec2
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/deployment-list-1.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/job-offline.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/job-offline.jpg
new file mode 100644
index 000000000..04dfded16
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/job-offline.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/set-task-name.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/set-task-name.jpg
new file mode 100644
index 000000000..b77ef2cb3
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/set-task-name.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-traffic.jpg b/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-traffic.jpg
new file mode 100644
index 000000000..860040ce4
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/canary-release/take-over-traffic.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg
new file mode 100644
index 000000000..3366c6b33
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/new-deployment.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg
new file mode 100644
index 000000000..b923b78e5
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/remove-traffic-mirroring.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.jpg
new file mode 100644
index 000000000..df7cc86b8
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroing-task.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-1.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-1.jpg
new file mode 100644
index 000000000..bcb7a9bf8
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-1.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-2.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-2.jpg
new file mode 100644
index 000000000..27a438948
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-2.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-3.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-3.jpg
new file mode 100644
index 000000000..d0014a5c6
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-3.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg
new file mode 100644
index 000000000..52ebe8793
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-4.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-5.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-5.jpg
new file mode 100644
index 000000000..5ef193259
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-5.jpg differ
diff --git a/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg
new file mode 100644
index 000000000..98f0770ef
Binary files /dev/null and b/static/images/docs/project-user-guide/grayscale-release/traffic-mirroring/traffic-mirroring-6.jpg differ
diff --git a/static/images/docs/quickstart/WordPress-1.png b/static/images/docs/quickstart/WordPress-1.png
new file mode 100644
index 000000000..46a788ce7
Binary files /dev/null and b/static/images/docs/quickstart/WordPress-1.png differ
diff --git a/static/images/docs/quickstart/access-method.png b/static/images/docs/quickstart/access-method.png
new file mode 100644
index 000000000..31bc6351d
Binary files /dev/null and b/static/images/docs/quickstart/access-method.png differ
diff --git a/static/images/docs/quickstart/add-mysql-backend-component.png b/static/images/docs/quickstart/add-mysql-backend-component.png
new file mode 100644
index 000000000..a60fc99d3
Binary files /dev/null and b/static/images/docs/quickstart/add-mysql-backend-component.png differ
diff --git a/static/images/docs/quickstart/add-service.png b/static/images/docs/quickstart/add-service.png
new file mode 100644
index 000000000..dedb4081d
Binary files /dev/null and b/static/images/docs/quickstart/add-service.png differ
diff --git a/static/images/docs/quickstart/add-wordPress-frontend-component.png b/static/images/docs/quickstart/add-wordPress-frontend-component.png
new file mode 100644
index 000000000..d870c7679
Binary files /dev/null and b/static/images/docs/quickstart/add-wordPress-frontend-component.png differ
diff --git a/static/images/docs/quickstart/advanced-setting.png b/static/images/docs/quickstart/advanced-setting.png
new file mode 100644
index 000000000..cee445130
Binary files /dev/null and b/static/images/docs/quickstart/advanced-setting.png differ
diff --git a/static/images/docs/quickstart/advanced-settings-wordpress.png b/static/images/docs/quickstart/advanced-settings-wordpress.png
new file mode 100644
index 000000000..eb2eebe0f
Binary files /dev/null and b/static/images/docs/quickstart/advanced-settings-wordpress.png differ
diff --git a/static/images/docs/quickstart/basic-info.png b/static/images/docs/quickstart/basic-info.png
new file mode 100644
index 000000000..5378d464d
Binary files /dev/null and b/static/images/docs/quickstart/basic-info.png differ
diff --git a/static/images/docs/quickstart/choose-existing-volume.png b/static/images/docs/quickstart/choose-existing-volume.png
new file mode 100644
index 000000000..f80f6122c
Binary files /dev/null and b/static/images/docs/quickstart/choose-existing-volume.png differ
diff --git a/static/images/docs/quickstart/choose-existing.png b/static/images/docs/quickstart/choose-existing.png
new file mode 100644
index 000000000..12de7f41b
Binary files /dev/null and b/static/images/docs/quickstart/choose-existing.png differ
diff --git a/static/images/docs/quickstart/clusters-management-zh.png b/static/images/docs/quickstart/clusters-management-zh.png
new file mode 100644
index 000000000..0c4455ff9
Binary files /dev/null and b/static/images/docs/quickstart/clusters-management-zh.png differ
diff --git a/static/images/docs/quickstart/container-image-mysql.png b/static/images/docs/quickstart/container-image-mysql.png
new file mode 100644
index 000000000..79b6ece1f
Binary files /dev/null and b/static/images/docs/quickstart/container-image-mysql.png differ
diff --git a/static/images/docs/quickstart/container-image-wordpress.png b/static/images/docs/quickstart/container-image-wordpress.png
new file mode 100644
index 000000000..e60efb020
Binary files /dev/null and b/static/images/docs/quickstart/container-image-wordpress.png differ
diff --git a/static/images/docs/quickstart/container-image.png b/static/images/docs/quickstart/container-image.png
new file mode 100644
index 000000000..8383ea6bc
Binary files /dev/null and b/static/images/docs/quickstart/container-image.png differ
diff --git a/static/images/docs/quickstart/crds-zh.png b/static/images/docs/quickstart/crds-zh.png
new file mode 100644
index 000000000..5a073e58b
Binary files /dev/null and b/static/images/docs/quickstart/crds-zh.png differ
diff --git a/static/images/docs/quickstart/create-secret.png b/static/images/docs/quickstart/create-secret.png
new file mode 100644
index 000000000..27ad86bec
Binary files /dev/null and b/static/images/docs/quickstart/create-secret.png differ
diff --git a/static/images/docs/quickstart/create-volume.png b/static/images/docs/quickstart/create-volume.png
new file mode 100644
index 000000000..6e266ea0b
Binary files /dev/null and b/static/images/docs/quickstart/create-volume.png differ
diff --git a/static/images/docs/quickstart/create.png b/static/images/docs/quickstart/create.png
new file mode 100644
index 000000000..ad0a0bc45
Binary files /dev/null and b/static/images/docs/quickstart/create.png differ
diff --git a/static/images/docs/quickstart/edit-internet-access.png b/static/images/docs/quickstart/edit-internet-access.png
new file mode 100644
index 000000000..44845f529
Binary files /dev/null and b/static/images/docs/quickstart/edit-internet-access.png differ
diff --git a/static/images/docs/quickstart/edit-ks-installer-zh.png b/static/images/docs/quickstart/edit-ks-installer-zh.png
new file mode 100644
index 000000000..dfe8e521c
Binary files /dev/null and b/static/images/docs/quickstart/edit-ks-installer-zh.png differ
diff --git a/static/images/docs/quickstart/enable-components-zh.png b/static/images/docs/quickstart/enable-components-zh.png
new file mode 100644
index 000000000..7f4902860
Binary files /dev/null and b/static/images/docs/quickstart/enable-components-zh.png differ
diff --git a/static/images/docs/quickstart/environment-var.png b/static/images/docs/quickstart/environment-var.png
new file mode 100644
index 000000000..37c89933d
Binary files /dev/null and b/static/images/docs/quickstart/environment-var.png differ
diff --git a/static/images/docs/quickstart/environment-varss.png b/static/images/docs/quickstart/environment-varss.png
new file mode 100644
index 000000000..8c131d91d
Binary files /dev/null and b/static/images/docs/quickstart/environment-varss.png differ
diff --git a/static/images/docs/quickstart/ingress.png b/static/images/docs/quickstart/ingress.png
new file mode 100644
index 000000000..d048169eb
Binary files /dev/null and b/static/images/docs/quickstart/ingress.png differ
diff --git a/static/images/docs/quickstart/key-value.png b/static/images/docs/quickstart/key-value.png
new file mode 100644
index 000000000..59d607bb9
Binary files /dev/null and b/static/images/docs/quickstart/key-value.png differ
diff --git a/static/images/docs/quickstart/mysql-done.png b/static/images/docs/quickstart/mysql-done.png
new file mode 100644
index 000000000..3c491e5fd
Binary files /dev/null and b/static/images/docs/quickstart/mysql-done.png differ
diff --git a/static/images/docs/quickstart/mysql-name.png b/static/images/docs/quickstart/mysql-name.png
new file mode 100644
index 000000000..12e0c9514
Binary files /dev/null and b/static/images/docs/quickstart/mysql-name.png differ
diff --git a/static/images/docs/quickstart/nodeport-number.png b/static/images/docs/quickstart/nodeport-number.png
new file mode 100644
index 000000000..9f4381308
Binary files /dev/null and b/static/images/docs/quickstart/nodeport-number.png differ
diff --git a/static/images/docs/quickstart/two-components-done.png b/static/images/docs/quickstart/two-components-done.png
new file mode 100644
index 000000000..7e488c031
Binary files /dev/null and b/static/images/docs/quickstart/two-components-done.png differ
diff --git a/static/images/docs/quickstart/volume-settings.png b/static/images/docs/quickstart/volume-settings.png
new file mode 100644
index 000000000..a1e152363
Binary files /dev/null and b/static/images/docs/quickstart/volume-settings.png differ
diff --git a/static/images/docs/quickstart/volume-template-wordpress.png b/static/images/docs/quickstart/volume-template-wordpress.png
new file mode 100644
index 000000000..6304f590c
Binary files /dev/null and b/static/images/docs/quickstart/volume-template-wordpress.png differ
diff --git a/static/images/docs/quickstart/volume-template.png b/static/images/docs/quickstart/volume-template.png
new file mode 100644
index 000000000..248f75cd9
Binary files /dev/null and b/static/images/docs/quickstart/volume-template.png differ
diff --git a/static/images/docs/quickstart/wordpress-deployment.png b/static/images/docs/quickstart/wordpress-deployment.png
new file mode 100644
index 000000000..f82c96bf8
Binary files /dev/null and b/static/images/docs/quickstart/wordpress-deployment.png differ
diff --git a/static/images/docs/quickstart/wordpress-secrets.png b/static/images/docs/quickstart/wordpress-secrets.png
new file mode 100644
index 000000000..4b4775ca5
Binary files /dev/null and b/static/images/docs/quickstart/wordpress-secrets.png differ
diff --git a/static/images/docs/quickstart/wordpress-statefulset.png b/static/images/docs/quickstart/wordpress-statefulset.png
new file mode 100644
index 000000000..10111cd84
Binary files /dev/null and b/static/images/docs/quickstart/wordpress-statefulset.png differ
diff --git a/static/images/docs/quickstart/wordpress.png b/static/images/docs/quickstart/wordpress.png
new file mode 100644
index 000000000..d8134a002
Binary files /dev/null and b/static/images/docs/quickstart/wordpress.png differ
diff --git a/static/images/docs/rabbitmq-app/rabbitMQ04.jpg b/static/images/docs/rabbitmq-app/rabbitMQ04.jpg
deleted file mode 100644
index 28df63d6f..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitMQ04.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq01.jpg b/static/images/docs/rabbitmq-app/rabbitmq01.jpg
deleted file mode 100644
index e7514329f..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq01.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq02.jpg b/static/images/docs/rabbitmq-app/rabbitmq02.jpg
deleted file mode 100644
index 4266dd19e..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq02.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq021.jpg b/static/images/docs/rabbitmq-app/rabbitmq021.jpg
deleted file mode 100644
index fee42ea5a..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq021.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq03.jpg b/static/images/docs/rabbitmq-app/rabbitmq03.jpg
deleted file mode 100644
index 4d8b98ec3..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq03.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq05.jpg b/static/images/docs/rabbitmq-app/rabbitmq05.jpg
deleted file mode 100644
index ad872d6f8..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq05.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq06.jpg b/static/images/docs/rabbitmq-app/rabbitmq06.jpg
deleted file mode 100644
index 47d134f2d..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq06.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq07.jpg b/static/images/docs/rabbitmq-app/rabbitmq07.jpg
deleted file mode 100644
index f75104a66..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq07.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq08.jpg b/static/images/docs/rabbitmq-app/rabbitmq08.jpg
deleted file mode 100644
index 95419a6a8..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq08.jpg and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq09.png b/static/images/docs/rabbitmq-app/rabbitmq09.png
deleted file mode 100644
index 3996fe66a..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq09.png and /dev/null differ
diff --git a/static/images/docs/rabbitmq-app/rabbitmq10.png b/static/images/docs/rabbitmq-app/rabbitmq10.png
deleted file mode 100644
index 69e777cfb..000000000
Binary files a/static/images/docs/rabbitmq-app/rabbitmq10.png and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app01.jpg b/static/images/docs/tomcat-app/tomcat-app01.jpg
deleted file mode 100644
index e7514329f..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app01.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app02.jpg b/static/images/docs/tomcat-app/tomcat-app02.jpg
deleted file mode 100644
index 099b735fc..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app02.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app03.jpg b/static/images/docs/tomcat-app/tomcat-app03.jpg
deleted file mode 100644
index c69836fac..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app03.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app04.jpg b/static/images/docs/tomcat-app/tomcat-app04.jpg
deleted file mode 100644
index 97cc48832..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app04.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app05.jpg b/static/images/docs/tomcat-app/tomcat-app05.jpg
deleted file mode 100644
index 9c3b01077..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app05.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app06.jpg b/static/images/docs/tomcat-app/tomcat-app06.jpg
deleted file mode 100644
index 0349a31e9..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app06.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app07.jpg b/static/images/docs/tomcat-app/tomcat-app07.jpg
deleted file mode 100644
index ab3e4a2c7..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app07.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app08.jpg b/static/images/docs/tomcat-app/tomcat-app08.jpg
deleted file mode 100644
index 96a1ed83c..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app08.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app09.jpg b/static/images/docs/tomcat-app/tomcat-app09.jpg
deleted file mode 100644
index 04f42372d..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app09.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app11.jpg b/static/images/docs/tomcat-app/tomcat-app11.jpg
deleted file mode 100644
index 04f42372d..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app11.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app12.jpg b/static/images/docs/tomcat-app/tomcat-app12.jpg
deleted file mode 100644
index ce74444bb..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app12.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app13.jpg b/static/images/docs/tomcat-app/tomcat-app13.jpg
deleted file mode 100644
index 672ea70c9..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app13.jpg and /dev/null differ
diff --git a/static/images/docs/tomcat-app/tomcat-app14.jpg b/static/images/docs/tomcat-app/tomcat-app14.jpg
deleted file mode 100644
index 001d17e0a..000000000
Binary files a/static/images/docs/tomcat-app/tomcat-app14.jpg and /dev/null differ
diff --git a/static/images/service-mesh/blue-green-7.jpg b/static/images/service-mesh/blue-green-7.jpg
new file mode 100644
index 000000000..fa3222e91
Binary files /dev/null and b/static/images/service-mesh/blue-green-7.jpg differ
diff --git a/static/images/service-mesh/istio-1.jpg b/static/images/service-mesh/istio-1.jpg
new file mode 100644
index 000000000..6d962cfb9
Binary files /dev/null and b/static/images/service-mesh/istio-1.jpg differ
diff --git a/static/images/service-mesh/traffic-mirroing-4.jpg b/static/images/service-mesh/traffic-mirroing-4.jpg
new file mode 100644
index 000000000..b7c169e4f
Binary files /dev/null and b/static/images/service-mesh/traffic-mirroing-4.jpg differ
diff --git a/static/images/service-mesh/traffic-mirroring-6.jpg b/static/images/service-mesh/traffic-mirroring-6.jpg
new file mode 100644
index 000000000..98f0770ef
Binary files /dev/null and b/static/images/service-mesh/traffic-mirroring-6.jpg differ
diff --git a/static/images/service-mesh/traffic-mirroring-7.jpg b/static/images/service-mesh/traffic-mirroring-7.jpg
new file mode 100644
index 000000000..163de1008
Binary files /dev/null and b/static/images/service-mesh/traffic-mirroring-7.jpg differ
diff --git a/static/images/storage/apply-snapshot.png b/static/images/storage/apply-snapshot.png
new file mode 100644
index 000000000..83c72991f
Binary files /dev/null and b/static/images/storage/apply-snapshot.png differ
diff --git a/static/images/storage/attach-volume.png b/static/images/storage/attach-volume.png
new file mode 100644
index 000000000..8c6ca8136
Binary files /dev/null and b/static/images/storage/attach-volume.png differ
diff --git a/static/images/storage/create-snapshot.png b/static/images/storage/create-snapshot.png
new file mode 100644
index 000000000..d1a4ce5bf
Binary files /dev/null and b/static/images/storage/create-snapshot.png differ
diff --git a/static/images/storage/create-storage-class-basic-info.png b/static/images/storage/create-storage-class-basic-info.png
new file mode 100644
index 000000000..426a07fc4
Binary files /dev/null and b/static/images/storage/create-storage-class-basic-info.png differ
diff --git a/static/images/storage/create-storage-class-settings.png b/static/images/storage/create-storage-class-settings.png
new file mode 100644
index 000000000..7b6ed59d5
Binary files /dev/null and b/static/images/storage/create-storage-class-settings.png differ
diff --git a/static/images/storage/create-storage-class-storage-system-custom.png b/static/images/storage/create-storage-class-storage-system-custom.png
new file mode 100644
index 000000000..b7ac5cde0
Binary files /dev/null and b/static/images/storage/create-storage-class-storage-system-custom.png differ
diff --git a/static/images/storage/create-storage-class-storage-system.png b/static/images/storage/create-storage-class-storage-system.png
new file mode 100644
index 000000000..f74a7988f
Binary files /dev/null and b/static/images/storage/create-storage-class-storage-system.png differ
diff --git a/static/images/storage/create-volume.png b/static/images/storage/create-volume.png
new file mode 100644
index 000000000..1ab2deb1e
Binary files /dev/null and b/static/images/storage/create-volume.png differ
diff --git a/static/images/storage/snapshot-list.png b/static/images/storage/snapshot-list.png
new file mode 100644
index 000000000..a3771b04f
Binary files /dev/null and b/static/images/storage/snapshot-list.png differ
diff --git a/static/images/storage/storage-system.png b/static/images/storage/storage-system.png
new file mode 100644
index 000000000..71ec77725
Binary files /dev/null and b/static/images/storage/storage-system.png differ
diff --git a/static/images/storage/volume-features.png b/static/images/storage/volume-features.png
new file mode 100644
index 000000000..ddcb12813
Binary files /dev/null and b/static/images/storage/volume-features.png differ
diff --git a/static/images/storage/volume-monitoring.png b/static/images/storage/volume-monitoring.png
new file mode 100644
index 000000000..32d7c5f72
Binary files /dev/null and b/static/images/storage/volume-monitoring.png differ
diff --git a/static/json/crd.json b/static/json/crd.json
new file mode 100644
index 000000000..3523f1cc1
--- /dev/null
+++ b/static/json/crd.json
@@ -0,0 +1,11401 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "KubeSphere",
+ "contact": {
+ "name": "KubeSphere",
+ "url": "https://kubesphere.io/",
+ "email": "kubesphere@gmail.com"
+ },
+ "license": {
+ "name": "Apache 2.0",
+ "url": "https://www.apache.org/licenses/LICENSE-2.0.html"
+ },
+ "version": "v3.0.0",
+ "x-logo": {
+ "url": "/images/ApiDocs.svg"
+ }
+ },
+ "paths": {
+ "/apis/": {
+ "get": {
+ "description": "get available API versions",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "apis"
+ ],
+ "operationId": "getAPIVersions",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/cluster.kubesphere.io/": {
+ "get": {
+ "description": "get information of a group",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo"
+ ],
+ "operationId": "getClusterKubesphereIoAPIGroup",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+ }
+ }
+ }
+ }
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/": {
+ "get": {
+ "description": "get available resources",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "getClusterKubesphereIoV1alpha1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/clusters": {
+ "get": {
+ "description": "list or watch objects of kind Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listClusterKubesphereIoV1alpha1Cluster",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "post": {
+ "description": "create a Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createClusterKubesphereIoV1alpha1Cluster",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "delete": {
+ "description": "delete collection of Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteClusterKubesphereIoV1alpha1CollectionCluster",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/clusters/{name}": {
+ "get": {
+ "description": "read the specified Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readClusterKubesphereIoV1alpha1Cluster",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "put": {
+ "description": "replace the specified Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceClusterKubesphereIoV1alpha1Cluster",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "delete": {
+ "description": "delete a Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteClusterKubesphereIoV1alpha1Cluster",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified Cluster",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchClusterKubesphereIoV1alpha1Cluster",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Cluster",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/clusters/{name}/status": {
+ "get": {
+ "description": "read status of the specified Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readClusterKubesphereIoV1alpha1ClusterStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified Cluster",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceClusterKubesphereIoV1alpha1ClusterStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified Cluster",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchClusterKubesphereIoV1alpha1ClusterStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Cluster",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/watch/clusters": {
+ "get": {
+ "description": "watch individual changes to a list of Cluster. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchClusterKubesphereIoV1alpha1ClusterList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/cluster.kubesphere.io/v1alpha1/watch/clusters/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind Cluster. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "clusterKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchClusterKubesphereIoV1alpha1Cluster",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "cluster.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Cluster"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Cluster",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/": {
+ "get": {
+ "description": "get information of a group",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo"
+ ],
+ "operationId": "getDevopsKubesphereIoAPIGroup",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+ }
+ }
+ }
+ }
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/": {
+ "get": {
+ "description": "get available resources",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "getDevopsKubesphereIoV1alpha1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibinaries": {
+ "get": {
+ "description": "list or watch objects of kind S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha1S2iBinary",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinaryList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "post": {
+ "description": "create a S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha1S2iBinary",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "delete": {
+ "description": "delete collection of S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1CollectionS2iBinary",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibinaries/{name}": {
+ "get": {
+ "description": "read the specified S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBinary",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "put": {
+ "description": "replace the specified S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBinary",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "delete": {
+ "description": "delete a S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1S2iBinary",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified S2iBinary",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBinary",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBinary",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibinaries/{name}/status": {
+ "get": {
+ "description": "read status of the specified S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBinaryStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified S2iBinary",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBinaryStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified S2iBinary",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBinaryStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBinary",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuilders": {
+ "get": {
+ "description": "list or watch objects of kind S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha1S2iBuilder",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "post": {
+ "description": "create a S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha1S2iBuilder",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "delete": {
+ "description": "delete collection of S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1CollectionS2iBuilder",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuilders/{name}": {
+ "get": {
+ "description": "read the specified S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBuilder",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "put": {
+ "description": "replace the specified S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBuilder",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "delete": {
+ "description": "delete a S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1S2iBuilder",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified S2iBuilder",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBuilder",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilder",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuilders/{name}/status": {
+ "get": {
+ "description": "read status of the specified S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBuilderStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified S2iBuilder",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBuilderStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified S2iBuilder",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBuilderStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilder",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuildertemplates": {
+ "get": {
+ "description": "list or watch objects of kind S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "post": {
+ "description": "create a S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "delete": {
+ "description": "delete collection of S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1CollectionS2iBuilderTemplate",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuildertemplates/{name}": {
+ "get": {
+ "description": "read the specified S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "put": {
+ "description": "replace the specified S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "delete": {
+ "description": "delete a S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified S2iBuilderTemplate",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilderTemplate",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2ibuildertemplates/{name}/status": {
+ "get": {
+ "description": "read status of the specified S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iBuilderTemplateStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified S2iBuilderTemplate",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iBuilderTemplateStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified S2iBuilderTemplate",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iBuilderTemplateStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilderTemplate",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2iruns": {
+ "get": {
+ "description": "list or watch objects of kind S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha1S2iRun",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "post": {
+ "description": "create a S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha1S2iRun",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "delete": {
+ "description": "delete collection of S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1CollectionS2iRun",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2iruns/{name}": {
+ "get": {
+ "description": "read the specified S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iRun",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "put": {
+ "description": "replace the specified S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iRun",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "delete": {
+ "description": "delete a S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha1S2iRun",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified S2iRun",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iRun",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iRun",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/s2iruns/{name}/status": {
+ "get": {
+ "description": "read status of the specified S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha1S2iRunStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified S2iRun",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha1S2iRunStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified S2iRun",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha1S2iRunStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iRun",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibinaries": {
+ "get": {
+ "description": "watch individual changes to a list of S2iBinary. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBinaryList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibinaries/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind S2iBinary. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBinary",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBinary"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBinary",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibuilders": {
+ "get": {
+ "description": "watch individual changes to a list of S2iBuilder. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBuilderList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibuilders/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind S2iBuilder. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBuilder",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilder"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilder",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibuildertemplates": {
+ "get": {
+ "description": "watch individual changes to a list of S2iBuilderTemplate. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBuilderTemplateList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2ibuildertemplates/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind S2iBuilderTemplate. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iBuilderTemplate",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iBuilderTemplate"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iBuilderTemplate",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2iruns": {
+ "get": {
+ "description": "watch individual changes to a list of S2iRun. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iRunList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha1/watch/s2iruns/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind S2iRun. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha1S2iRun",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "S2iRun"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the S2iRun",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/": {
+ "get": {
+ "description": "get available resources",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "getDevopsKubesphereIoV1alpha3APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/devopsprojects": {
+ "get": {
+ "description": "list or watch objects of kind DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha3DevOpsProject",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "post": {
+ "description": "create a DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha3DevOpsProject",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "delete": {
+ "description": "delete collection of DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha3CollectionDevOpsProject",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/devopsprojects/{name}": {
+ "get": {
+ "description": "read the specified DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha3DevOpsProject",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "put": {
+ "description": "replace the specified DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha3DevOpsProject",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "delete": {
+ "description": "delete a DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha3DevOpsProject",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified DevOpsProject",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha3DevOpsProject",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the DevOpsProject",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/devopsprojects/{name}/status": {
+ "get": {
+ "description": "read status of the specified DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha3DevOpsProjectStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified DevOpsProject",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha3DevOpsProjectStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified DevOpsProject",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha3DevOpsProjectStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the DevOpsProject",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/pipelines": {
+ "get": {
+ "description": "list or watch objects of kind Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "listDevopsKubesphereIoV1alpha3Pipeline",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "post": {
+ "description": "create a Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "createDevopsKubesphereIoV1alpha3Pipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "delete": {
+ "description": "delete collection of Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha3CollectionPipeline",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/pipelines/{name}": {
+ "get": {
+ "description": "read the specified Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha3Pipeline",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "put": {
+ "description": "replace the specified Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha3Pipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "delete": {
+ "description": "delete a Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "deleteDevopsKubesphereIoV1alpha3Pipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified Pipeline",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha3Pipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Pipeline",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/pipelines/{name}/status": {
+ "get": {
+ "description": "read status of the specified Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "readDevopsKubesphereIoV1alpha3PipelineStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified Pipeline",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "replaceDevopsKubesphereIoV1alpha3PipelineStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified Pipeline",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "patchDevopsKubesphereIoV1alpha3PipelineStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Pipeline",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/watch/devopsprojects": {
+ "get": {
+ "description": "watch individual changes to a list of DevOpsProject. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha3DevOpsProjectList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/watch/devopsprojects/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind DevOpsProject. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha3DevOpsProject",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "DevOpsProject"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the DevOpsProject",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/watch/pipelines": {
+ "get": {
+ "description": "watch individual changes to a list of Pipeline. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha3PipelineList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/devops.kubesphere.io/v1alpha3/watch/pipelines/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind Pipeline. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "devopsKubesphereIo_v1alpha3"
+ ],
+ "operationId": "watchDevopsKubesphereIoV1alpha3Pipeline",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "devops.kubesphere.io",
+ "version": "v1alpha3",
+ "kind": "Pipeline"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Pipeline",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/network.kubesphere.io/": {
+ "get": {
+ "description": "get information of a group",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo"
+ ],
+ "operationId": "getNetworkKubesphereIoAPIGroup",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+ }
+ }
+ }
+ }
+ },
+ "/apis/network.kubesphere.io/v1alpha1/": {
+ "get": {
+ "description": "get available resources",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "getNetworkKubesphereIoV1alpha1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/network.kubesphere.io/v1alpha1/namespacenetworkpolicies": {
+ "get": {
+ "description": "list or watch objects of kind NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicyList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "post": {
+ "description": "create a NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "delete": {
+ "description": "delete collection of NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteNetworkKubesphereIoV1alpha1CollectionNamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/network.kubesphere.io/v1alpha1/namespacenetworkpolicies/{name}": {
+ "get": {
+ "description": "read the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "put": {
+ "description": "replace the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "delete": {
+ "description": "delete a NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the NamespaceNetworkPolicy",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/network.kubesphere.io/v1alpha1/namespacenetworkpolicies/{name}/status": {
+ "get": {
+ "description": "read status of the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readNetworkKubesphereIoV1alpha1NamespaceNetworkPolicyStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceNetworkKubesphereIoV1alpha1NamespaceNetworkPolicyStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified NamespaceNetworkPolicy",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchNetworkKubesphereIoV1alpha1NamespaceNetworkPolicyStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the NamespaceNetworkPolicy",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/network.kubesphere.io/v1alpha1/watch/namespacenetworkpolicies": {
+ "get": {
+ "description": "watch individual changes to a list of NamespaceNetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchNetworkKubesphereIoV1alpha1NamespaceNetworkPolicyList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/network.kubesphere.io/v1alpha1/watch/namespacenetworkpolicies/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind NamespaceNetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "networkKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchNetworkKubesphereIoV1alpha1NamespaceNetworkPolicy",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "network.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "NamespaceNetworkPolicy"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the NamespaceNetworkPolicy",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/tenant.kubesphere.io/": {
+ "get": {
+ "description": "get information of a group",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo"
+ ],
+ "operationId": "getTenantKubesphereIoAPIGroup",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+ }
+ }
+ }
+ }
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/": {
+ "get": {
+ "description": "get available resources",
+ "consumes": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "getTenantKubesphereIoV1alpha1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ }
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/watch/workspaces": {
+ "get": {
+ "description": "watch individual changes to a list of Workspace. deprecated: use the 'watch' parameter with a list operation instead.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchTenantKubesphereIoV1alpha1WorkspaceList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/watch/workspaces/{name}": {
+ "get": {
+ "description": "watch changes to an object of kind Workspace. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "watchTenantKubesphereIoV1alpha1Workspace",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Workspace",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/workspaces": {
+ "get": {
+ "description": "list or watch objects of kind Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf",
+ "application/json;stream=watch",
+ "application/vnd.kubernetes.protobuf;stream=watch"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "listTenantKubesphereIoV1alpha1Workspace",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceList"
+ }
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "post": {
+ "description": "create a Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "createTenantKubesphereIoV1alpha1Workspace",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "delete": {
+ "description": "delete collection of Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteTenantKubesphereIoV1alpha1CollectionWorkspace",
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "name": "allowWatchBookmarks",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "name": "continue",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "name": "fieldSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "name": "labelSelector",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
+ "name": "resourceVersion",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "name": "timeoutSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "name": "watch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/workspaces/{name}": {
+ "get": {
+ "description": "read the specified Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readTenantKubesphereIoV1alpha1Workspace",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "put": {
+ "description": "replace the specified Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceTenantKubesphereIoV1alpha1Workspace",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "delete": {
+ "description": "delete a Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "deleteTenantKubesphereIoV1alpha1Workspace",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "integer",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "name": "gracePeriodSeconds",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "name": "orphanDependents",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "name": "propagationPolicy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "patch": {
+ "description": "partially update the specified Workspace",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchTenantKubesphereIoV1alpha1Workspace",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Workspace",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ },
+ "/apis/tenant.kubesphere.io/v1alpha1/workspaces/{name}/status": {
+ "get": {
+ "description": "read status of the specified Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "readTenantKubesphereIoV1alpha1WorkspaceStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "put": {
+ "description": "replace status of the specified Workspace",
+ "consumes": [
+ "*/*"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "replaceTenantKubesphereIoV1alpha1WorkspaceStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "name": "fieldManager",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ "201": {
+ "description": "Created",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "patch": {
+ "description": "partially update status of the specified Workspace",
+ "consumes": [
+ "application/json-patch+json",
+ "application/merge-patch+json",
+ "application/strategic-merge-patch+json",
+ "application/apply-patch+yaml"
+ ],
+ "produces": [
+ "application/json",
+ "application/yaml",
+ "application/vnd.kubernetes.protobuf"
+ ],
+ "schemes": [
+ "https"
+ ],
+ "tags": [
+ "tenantKubesphereIo_v1alpha1"
+ ],
+ "operationId": "patchTenantKubesphereIoV1alpha1WorkspaceStatus",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "name": "dryRun",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "name": "fieldManager",
+ "in": "query"
+ },
+ {
+ "uniqueItems": true,
+ "type": "boolean",
+ "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "name": "force",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "tenant.kubesphere.io",
+ "version": "v1alpha1",
+ "kind": "Workspace"
+ }
+ },
+ "parameters": [
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "name of the Workspace",
+ "name": "name",
+ "in": "path",
+ "required": true
+ },
+ {
+ "uniqueItems": true,
+ "type": "string",
+ "description": "If 'true', then the output is pretty printed.",
+ "name": "pretty",
+ "in": "query"
+ }
+ ]
+ }
+ },
+ "definitions": {
+ "io.k8s.api.core.v1.LocalObjectReference": {
+ "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.api.networking.v1.IPBlock": {
+ "description": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
+ "type": "object",
+ "required": [
+ "cidr"
+ ],
+ "properties": {
+ "cidr": {
+ "description": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"",
+ "type": "string"
+ },
+ "except": {
+ "description": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "io.k8s.api.networking.v1.NetworkPolicyPort": {
+ "description": "NetworkPolicyPort describes a port to allow traffic on",
+ "type": "object",
+ "properties": {
+ "port": {
+ "description": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"
+ },
+ "protocol": {
+ "description": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": {
+ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
+ "type": "object",
+ "required": [
+ "name",
+ "versions"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "name is the name of the group.",
+ "type": "string"
+ },
+ "preferredVersion": {
+ "description": "preferredVersion is the version preferred by the API server, which probably is the storage version.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
+ },
+ "serverAddressByClientCIDRs": {
+ "description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR"
+ }
+ },
+ "versions": {
+ "description": "versions are the versions supported in this group.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
+ }
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "APIGroup",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList": {
+ "description": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
+ "type": "object",
+ "required": [
+ "groups"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "groups": {
+ "description": "groups is a list of APIGroup.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "APIGroupList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": {
+ "description": "APIResource specifies the name of a resource and whether it is namespaced.",
+ "type": "object",
+ "required": [
+ "name",
+ "singularName",
+ "namespaced",
+ "kind",
+ "verbs"
+ ],
+ "properties": {
+ "categories": {
+ "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "group": {
+ "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
+ "type": "string"
+ },
+ "kind": {
+ "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+ "type": "string"
+ },
+ "name": {
+ "description": "name is the plural name of the resource.",
+ "type": "string"
+ },
+ "namespaced": {
+ "description": "namespaced indicates if a resource is namespaced or not.",
+ "type": "boolean"
+ },
+ "shortNames": {
+ "description": "shortNames is a list of suggested short names of the resource.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "singularName": {
+ "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
+ "type": "string"
+ },
+ "storageVersionHash": {
+ "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
+ "type": "string"
+ },
+ "verbs": {
+ "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "version": {
+ "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": {
+ "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
+ "type": "object",
+ "required": [
+ "groupVersion",
+ "resources"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "groupVersion": {
+ "description": "groupVersion is the group and version this APIResourceList is for.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "resources": {
+ "description": "resources contains the name of the resources and if they are namespaced.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"
+ }
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "APIResourceList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": {
+ "description": "DeleteOptions may be provided when deleting an API object.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "dryRun": {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "gracePeriodSeconds": {
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "orphanDependents": {
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "type": "boolean"
+ },
+ "preconditions": {
+ "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"
+ },
+ "propagationPolicy": {
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "type": "string"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "cluster.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha3"
+ },
+ {
+ "group": "network.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "servicemesh.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha2"
+ },
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha2"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": {
+ "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": {
+ "description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
+ "type": "object",
+ "required": [
+ "groupVersion",
+ "version"
+ ],
+ "properties": {
+ "groupVersion": {
+ "description": "groupVersion specifies the API group and version in the form \"group/version\"",
+ "type": "string"
+ },
+ "version": {
+ "description": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": {
+ "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+ "type": "object",
+ "properties": {
+ "continue": {
+ "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
+ "type": "string"
+ },
+ "remainingItemCount": {
+ "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "resourceVersion": {
+ "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": {
+ "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+ "type": "string"
+ },
+ "fieldsType": {
+ "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
+ "type": "string"
+ },
+ "fieldsV1": {
+ "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"
+ },
+ "manager": {
+ "description": "Manager is an identifier of the workflow managing these fields.",
+ "type": "string"
+ },
+ "operation": {
+ "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+ "type": "string"
+ },
+ "time": {
+ "description": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": {
+ "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ "type": "object",
+ "properties": {
+ "annotations": {
+ "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "clusterName": {
+ "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
+ "type": "string"
+ },
+ "creationTimestamp": {
+ "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "deletionGracePeriodSeconds": {
+ "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "deletionTimestamp": {
+ "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "finalizers": {
+ "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "x-kubernetes-patch-strategy": "merge"
+ },
+ "generateName": {
+ "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ "type": "string"
+ },
+ "generation": {
+ "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "labels": {
+ "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "managedFields": {
+ "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"
+ }
+ },
+ "name": {
+ "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ "type": "string"
+ },
+ "ownerReferences": {
+ "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"
+ },
+ "x-kubernetes-patch-merge-key": "uid",
+ "x-kubernetes-patch-strategy": "merge"
+ },
+ "resourceVersion": {
+ "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": {
+ "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
+ "type": "object",
+ "required": [
+ "apiVersion",
+ "kind",
+ "name",
+ "uid"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "API version of the referent.",
+ "type": "string"
+ },
+ "blockOwnerDeletion": {
+ "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
+ "type": "boolean"
+ },
+ "controller": {
+ "description": "If true, this reference points to the managing controller.",
+ "type": "boolean"
+ },
+ "kind": {
+ "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Patch": {
+ "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": {
+ "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+ "type": "object",
+ "properties": {
+ "resourceVersion": {
+ "description": "Specifies the target ResourceVersion",
+ "type": "string"
+ },
+ "uid": {
+ "description": "Specifies the target UID.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": {
+ "description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
+ "type": "object",
+ "required": [
+ "clientCIDR",
+ "serverAddress"
+ ],
+ "properties": {
+ "clientCIDR": {
+ "description": "The CIDR with which clients can match their IP to figure out the server address that they should use.",
+ "type": "string"
+ },
+ "serverAddress": {
+ "description": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Status": {
+ "description": "Status is a return value for calls that don't return other objects.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "code": {
+ "description": "Suggested HTTP return code for this status, 0 if not set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "details": {
+ "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human-readable description of the status of this operation.",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ },
+ "reason": {
+ "description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "type": "string"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "Status",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": {
+ "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
+ "type": "object",
+ "properties": {
+ "field": {
+ "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": {
+ "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
+ "type": "object",
+ "properties": {
+ "causes": {
+ "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"
+ }
+ },
+ "group": {
+ "description": "The group attribute of the resource associated with the status StatusReason.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
+ "type": "string"
+ },
+ "retryAfterSeconds": {
+ "description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "uid": {
+ "description": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Time": {
+ "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
+ "type": "string",
+ "format": "date-time"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": {
+ "description": "Event represents a single event to a watched resource.",
+ "type": "object",
+ "required": [
+ "type",
+ "object"
+ ],
+ "properties": {
+ "object": {
+ "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension"
+ },
+ "type": {
+ "type": "string"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "cluster.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha3"
+ },
+ {
+ "group": "network.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "servicemesh.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha2"
+ },
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha2"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.runtime.RawExtension": {
+ "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {
+ "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.",
+ "type": "string",
+ "format": "int-or-string"
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster": {
+ "description": "Cluster is the schema for the clusters API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "cluster.kubesphere.io",
+ "kind": "Cluster",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterCondition": {
+ "type": "object",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "description": "Last time the condition transitioned from one status to another.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "lastUpdateTime": {
+ "description": "The last time this condition was updated.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "message": {
+ "description": "A human readable message indicating details about the transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "The reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of the condition",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterList": {
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "cluster.kubesphere.io",
+ "kind": "ClusterList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterSpec": {
+ "type": "object",
+ "properties": {
+ "connection": {
+ "description": "Connection holds info to connect to the member cluster",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Connection"
+ },
+ "enable": {
+ "description": "Desired state of the cluster",
+ "type": "boolean"
+ },
+ "joinFederation": {
+ "description": "Join cluster as a kubefed cluster",
+ "type": "boolean"
+ },
+ "provider": {
+ "description": "Provider of the cluster, this field is just for description",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterStatus": {
+ "type": "object",
+ "properties": {
+ "conditions": {
+ "description": "Represents the latest available observations of a cluster's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterCondition"
+ }
+ },
+ "configz": {
+ "description": "Configz is status of components enabled in the member cluster. This is synchronized with member cluster every amount of time, like 5 minutes.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "kubernetesVersion": {
+ "description": "GitVersion of the kubernetes cluster, this field is populated by cluster controller",
+ "type": "string"
+ },
+ "nodeCount": {
+ "description": "Count of the kubernetes cluster nodes This field may not reflect the instant status of the cluster.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "region": {
+ "description": "Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.",
+ "type": "string"
+ },
+ "zones": {
+ "description": "Zones are the names of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Connection": {
+ "type": "object",
+ "properties": {
+ "kubeconfig": {
+ "description": "KubeConfig content used to connect to cluster api server Should provide this field explicitly if connection type is direct. Will be populated by ks-proxy if connection type is proxy.",
+ "type": "string",
+ "format": "byte"
+ },
+ "kubernetesAPIEndpoint": {
+ "description": "Kubernetes API Server endpoint. Example: https://10.10.0.1:6443 Should provide this field explicitly if connection type is direct. Will be populated by ks-apiserver if connection type is proxy.",
+ "type": "string"
+ },
+ "kubernetesAPIServerPort": {
+ "description": "KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic Only applicable when connection type is proxy.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "kubesphereAPIEndpoint": {
+ "description": "KubeSphere API Server endpoint. Example: http://10.10.0.11:8080 Should provide this field explicitly if connection type is direct. Will be populated by ks-apiserver if connection type is proxy.",
+ "type": "string"
+ },
+ "kubesphereAPIServerPort": {
+ "description": "KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic Only applicable when connection type is proxy.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "token": {
+ "description": "Token used by agents of member cluster to connect to host cluster proxy. This field is populated by apiserver only if connection type is proxy.",
+ "type": "string"
+ },
+ "type": {
+ "description": "type defines how host cluster will connect to host cluster ConnectionTypeDirect means direct connection, this requires\n kubeconfig and kubesphere apiserver endpoint provided\nConnectionTypeProxy means using kubesphere proxy, no kubeconfig\n or kubesphere apiserver endpoint required",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig": {
+ "description": "AuthConfig is our abstraction of the Registry authorization information for whatever docker client we happen to be based on",
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "secretRef": {
+ "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference"
+ },
+ "serverAddress": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.CGroupLimits": {
+ "description": "CGroupLimits holds limits used to constrain container resources.",
+ "type": "object",
+ "required": [
+ "memoryLimitBytes",
+ "cpuShares",
+ "cpuPeriod",
+ "cpuQuota",
+ "memorySwap",
+ "parent"
+ ],
+ "properties": {
+ "cpuPeriod": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "cpuQuota": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "cpuShares": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "memoryLimitBytes": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "memorySwap": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "parent": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.ContainerInfo": {
+ "type": "object",
+ "properties": {
+ "buildVolumes": {
+ "description": "BuildVolumes specifies a list of volumes to mount to container running the build.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "builderImage": {
+ "description": "BaseImage are the images this template will use.",
+ "type": "string"
+ },
+ "runtimeArtifacts": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.VolumeSpec"
+ }
+ },
+ "runtimeImage": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.DockerConfig": {
+ "description": "DockerConfig contains the configuration for a Docker connection.",
+ "type": "object",
+ "required": [
+ "endPoint",
+ "certFile",
+ "keyFile",
+ "caFile",
+ "useTLS",
+ "tlsVerify"
+ ],
+ "properties": {
+ "caFile": {
+ "description": "CAFile is the certificate authority file path for a TLS connection",
+ "type": "string"
+ },
+ "certFile": {
+ "description": "CertFile is the certificate file path for a TLS connection",
+ "type": "string"
+ },
+ "endPoint": {
+ "description": "Endpoint is the docker network endpoint or socket",
+ "type": "string"
+ },
+ "keyFile": {
+ "description": "KeyFile is the key file path for a TLS connection",
+ "type": "string"
+ },
+ "tlsVerify": {
+ "description": "TLSVerify indicates if TLS peer must be verified",
+ "type": "boolean"
+ },
+ "useTLS": {
+ "description": "UseTLS indicates if TLS must be used",
+ "type": "boolean"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.EnvironmentSpec": {
+ "description": "EnvironmentSpec specifies a single environment variable.",
+ "type": "object",
+ "required": [
+ "name",
+ "value"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.Parameter": {
+ "type": "object",
+ "properties": {
+ "defaultValue": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "optValues": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "required": {
+ "type": "boolean"
+ },
+ "type": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.ProxyConfig": {
+ "description": "ProxyConfig holds proxy configuration.",
+ "type": "object",
+ "properties": {
+ "httpProxy": {
+ "type": "string"
+ },
+ "httpsProxy": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary": {
+ "description": "S2iBinary is the Schema for the s2ibinaries API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinarySpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinaryStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBinary",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinaryList": {
+ "description": "S2iBinaryList contains a list of S2iBinary",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinary"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBinaryList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinarySpec": {
+ "description": "S2iBinarySpec defines the desired state of S2iBinary",
+ "type": "object",
+ "properties": {
+ "downloadURL": {
+ "description": "DownloadURL in KubeSphere",
+ "type": "string"
+ },
+ "fileName": {
+ "description": "FileName is filename of binary",
+ "type": "string"
+ },
+ "md5": {
+ "description": "MD5 is Binary's MD5 Hash",
+ "type": "string"
+ },
+ "size": {
+ "description": "Size is the file size of file",
+ "type": "string"
+ },
+ "uploadTimeStamp": {
+ "description": "UploadTime is last upload time",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBinaryStatus": {
+ "description": "S2iBinaryStatus defines the observed state of S2iBinary",
+ "type": "object",
+ "properties": {
+ "phase": {
+ "description": "Phase is status of S2iBinary . Possible value is \"Ready\",\"UnableToDownload\"",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuildResult": {
+ "type": "object",
+ "properties": {
+ "commandPull": {
+ "description": "Command for pull image.",
+ "type": "string"
+ },
+ "imageCreated": {
+ "description": "Image created time.",
+ "type": "string"
+ },
+ "imageID": {
+ "description": "Image ID.",
+ "type": "string"
+ },
+ "imageName": {
+ "description": "ImageName is the name of artifact",
+ "type": "string"
+ },
+ "imageRepoTags": {
+ "description": "image tags.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "imageSize": {
+ "description": "The size in bytes of the image",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuildSource": {
+ "type": "object",
+ "properties": {
+ "binaryName": {
+ "description": "Binary file Name",
+ "type": "string"
+ },
+ "binarySize": {
+ "description": "Binary file Size",
+ "type": "integer",
+ "format": "int64"
+ },
+ "builderImage": {
+ "description": "// BuilderImage describes which image is used for building the result images.",
+ "type": "string"
+ },
+ "commitID": {
+ "description": "CommitID represents an arbitrary extended object reference in Git as SHA-1",
+ "type": "string"
+ },
+ "committerEmail": {
+ "description": "CommitterEmail contains the e-mail of the committer",
+ "type": "string"
+ },
+ "committerName": {
+ "description": "CommitterName contains the name of the committer",
+ "type": "string"
+ },
+ "description": {
+ "description": "Description is a result image description label. The default is no description.",
+ "type": "string"
+ },
+ "revisionId": {
+ "description": "The RevisionId is a branch name or a SHA-1 hash of every important thing about the commit",
+ "type": "string"
+ },
+ "sourceUrl": {
+ "description": "SourceURL is url of the codes such as https://github.com/a/b.git",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder": {
+ "description": "S2iBuilder is the Schema for the s2ibuilders API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBuilder",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderList": {
+ "description": "S2iBuilderList contains a list of S2iBuilder",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilder"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBuilderList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderSpec": {
+ "description": "S2iBuilderSpec defines the desired state of S2iBuilder",
+ "type": "object",
+ "properties": {
+ "config": {
+ "description": "INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run \"make\" to regenerate code after modifying this file",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iConfig"
+ },
+ "fromTemplate": {
+ "description": "FromTemplate define some inputs from user",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.UserDefineTemplate"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderStatus": {
+ "description": "S2iBuilderStatus defines the observed state of S2iBuilder",
+ "type": "object",
+ "required": [
+ "runCount"
+ ],
+ "properties": {
+ "lastRunName": {
+ "description": "LastRunState return the name of the newest run of this builder",
+ "type": "string"
+ },
+ "lastRunStartTime": {
+ "description": "LastRunStartTime return the startTime of the newest run of this builder",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "lastRunState": {
+ "description": "LastRunState return the state of the newest run of this builder",
+ "type": "string"
+ },
+ "runCount": {
+ "description": "RunCount represent the sum of s2irun of this builder",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate": {
+ "description": "S2iBuilderTemplate is the Schema for the s2ibuildertemplates API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBuilderTemplate",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateList": {
+ "description": "S2iBuilderTemplateList contains a list of S2iBuilderTemplate",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplate"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iBuilderTemplateList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateSpec": {
+ "description": "S2iBuilderTemplateSpec defines the desired state of S2iBuilderTemplate",
+ "type": "object",
+ "properties": {
+ "codeFramework": {
+ "description": "CodeFramework means which language this template is designed for and which framework is using if has framework. Like Java, NodeJS etc",
+ "type": "string"
+ },
+ "containerInfo": {
+ "description": "Images are the images this template will use.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.ContainerInfo"
+ }
+ },
+ "defaultBaseImage": {
+ "description": "DefaultBaseImage is the image that will be used by default",
+ "type": "string"
+ },
+ "description": {
+ "description": "Description illustrate the purpose of this template",
+ "type": "string"
+ },
+ "environment": {
+ "description": "Parameters is a set of environment variables to be passed to the image.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.Parameter"
+ }
+ },
+ "iconPath": {
+ "description": "IconPath is used for frontend display",
+ "type": "string"
+ },
+ "version": {
+ "description": "Version of template",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuilderTemplateStatus": {
+ "description": "S2iBuilderTemplateStatus defines the observed state of S2iBuilderTemplate",
+ "type": "object"
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iConfig": {
+ "type": "object",
+ "required": [
+ "imageName",
+ "sourceUrl"
+ ],
+ "properties": {
+ "addHost": {
+ "description": "AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "asDockerfile": {
+ "description": "AsDockerfile indicates the path where the Dockerfile should be written instead of building a new image.",
+ "type": "string"
+ },
+ "assembleUser": {
+ "description": "AssembleUser specifies the user to run the assemble script in container",
+ "type": "string"
+ },
+ "blockOnBuild": {
+ "description": "BlockOnBuild prevents s2i from performing a docker build operation if one is necessary to execute ONBUILD commands, or to layer source code into the container for images that don't have a tar binary available, if the image contains ONBUILD commands that would be executed.",
+ "type": "boolean"
+ },
+ "branchExpression": {
+ "description": "Regular expressions, ignoring names that do not match the provided regular expression",
+ "type": "string"
+ },
+ "buildVolumes": {
+ "description": "BuildVolumes specifies a list of volumes to mount to container running the build.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "builderBaseImageVersion": {
+ "description": "BuilderBaseImageVersion provides optional version information about the builder base image.",
+ "type": "string"
+ },
+ "builderImage": {
+ "description": "BuilderImage describes which image is used for building the result images.",
+ "type": "string"
+ },
+ "builderImageVersion": {
+ "description": "BuilderImageVersion provides optional version information about the builder image.",
+ "type": "string"
+ },
+ "builderPullPolicy": {
+ "description": "BuilderPullPolicy specifies when to pull the builder image",
+ "type": "string"
+ },
+ "callbackUrl": {
+ "description": "CallbackURL is a URL which is called upon successful build to inform about that fact.",
+ "type": "string"
+ },
+ "cgroupLimits": {
+ "description": "CGroupLimits describes the cgroups limits that will be applied to any containers run by s2i.",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.CGroupLimits"
+ },
+ "contextDir": {
+ "description": "Specify a relative directory inside the application repository that should be used as a root directory for the application.",
+ "type": "string"
+ },
+ "description": {
+ "description": "Description is a result image description label. The default is no description.",
+ "type": "string"
+ },
+ "destination": {
+ "description": "Destination specifies a location where the untar operation will place its artifacts.",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "DisplayName is a result image display-name label. This defaults to the output image name.",
+ "type": "string"
+ },
+ "dockerConfig": {
+ "description": "DockerConfig describes how to access host docker daemon.",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.DockerConfig"
+ },
+ "dockerNetworkMode": {
+ "description": "DockerNetworkMode is used to set the docker network setting to --net=container:\u003cid\u003e when the builder is invoked from a container.",
+ "type": "string"
+ },
+ "dropCapabilities": {
+ "description": "DropCapabilities contains a list of capabilities to drop when executing containers",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "environment": {
+ "description": "Environment is a map of environment variables to be passed to the image.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.EnvironmentSpec"
+ }
+ },
+ "excludeRegExp": {
+ "description": "ExcludeRegExp contains a string representation of the regular expression desired for deciding which files to exclude from the tar stream",
+ "type": "string"
+ },
+ "export": {
+ "description": "Export Push the result image to specify image registry in tag",
+ "type": "boolean"
+ },
+ "gitSecretRef": {
+ "description": "GitSecretRef is the BasicAuth Secret of Git Clone",
+ "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference"
+ },
+ "hasOnBuild": {
+ "description": "HasOnBuild will be set to true if the builder image contains ONBUILD instructions",
+ "type": "boolean"
+ },
+ "imageName": {
+ "description": "ImageName Contains the registry address and reponame, tag should set by field tag alone",
+ "type": "string"
+ },
+ "imageScriptsUrl": {
+ "description": "ImageScriptsURL is the default location to find the assemble/run scripts for a builder image. This url can be a reference within the builder image if the scheme is specified as image://",
+ "type": "string"
+ },
+ "imageWorkDir": {
+ "description": "ImageWorkDir is the default working directory for the builder image.",
+ "type": "string"
+ },
+ "incremental": {
+ "description": "Incremental describes whether to try to perform incremental build.",
+ "type": "boolean"
+ },
+ "incrementalAuthentication": {
+ "description": "IncrementalAuthentication holds the authentication information for pulling the previous image from private repositories",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig"
+ },
+ "incrementalFromTag": {
+ "description": "IncrementalFromTag sets an alternative image tag to look for existing artifacts. Tag is used by default if this is not set.",
+ "type": "string"
+ },
+ "injections": {
+ "description": "Injections specifies a list source/destination folders that are injected to the container that runs assemble. All files we inject will be truncated after the assemble script finishes.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.VolumeSpec"
+ }
+ },
+ "isBinaryURL": {
+ "description": "IsBinaryURL explain the type of SourceURL. If it is IsBinaryURL, it will download the file directly without using git.",
+ "type": "boolean"
+ },
+ "keepSymlinks": {
+ "description": "KeepSymlinks indicates to copy symlinks as symlinks. Default behavior is to follow symlinks and copy files by content.",
+ "type": "boolean"
+ },
+ "labelNamespace": {
+ "description": "LabelNamespace provides the namespace under which the labels will be generated.",
+ "type": "string"
+ },
+ "labels": {
+ "description": "Labels specify labels and their values to be applied to the resulting image. Label keys must have non-zero length. The labels defined here override generated labels in case they have the same name.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "layeredBuild": {
+ "description": "LayeredBuild describes if this is build which layered scripts and sources on top of BuilderImage.",
+ "type": "boolean"
+ },
+ "nodeAffinityKey": {
+ "description": "The key of Node Affinity.",
+ "type": "string"
+ },
+ "nodeAffinityValues": {
+ "description": "The values of Node Affinity.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "outputBuildResult": {
+ "description": "Whether output build result to status.",
+ "type": "boolean"
+ },
+ "outputImageName": {
+ "description": "OutputImageName is a result image name without tag, default is latest. tag will append to ImageName in the end",
+ "type": "string"
+ },
+ "preserveWorkingDir": {
+ "description": "PreserveWorkingDir describes if working directory should be left after processing.",
+ "type": "boolean"
+ },
+ "previousImagePullPolicy": {
+ "description": "PreviousImagePullPolicy specifies when to pull the previously build image when doing incremental build",
+ "type": "string"
+ },
+ "pullAuthentication": {
+ "description": "PullAuthentication holds the authentication information for pulling the Docker images from private repositories",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig"
+ },
+ "pushAuthentication": {
+ "description": "PullAuthentication holds the authentication information for pulling the Docker images from private repositories",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig"
+ },
+ "removePreviousImage": {
+ "description": "RemovePreviousImage describes if previous image should be removed after successful build. This applies only to incremental builds.",
+ "type": "boolean"
+ },
+ "revisionId": {
+ "description": "The RevisionId is a branch name or a SHA-1 hash of every important thing about the commit",
+ "type": "string"
+ },
+ "runImage": {
+ "description": "RunImage will trigger a \"docker run ...\" invocation of the produced image so the user can see if it operates as he would expect",
+ "type": "boolean"
+ },
+ "runtimeArtifacts": {
+ "description": "RuntimeArtifacts specifies a list of source/destination pairs that will be copied from builder to a runtime image. Source can be a file or directory. Destination must be a directory. Regardless whether it is an absolute or relative path, it will be placed into image's WORKDIR. Destination also can be empty or equals to \".\", in this case it just refers to a root of WORKDIR. In case it's empty, S2I will try to get this list from io.openshift.s2i.assemble-input-files label on a RuntimeImage.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.VolumeSpec"
+ }
+ },
+ "runtimeAuthentication": {
+ "description": "RuntimeAuthentication holds the authentication information for pulling the runtime Docker images from private repositories.",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig"
+ },
+ "runtimeImage": {
+ "description": "RuntimeImage specifies the image that will be a base for resulting image and will be used for running an application. By default, BuilderImage is used for building and running, but the latter may be overridden.",
+ "type": "string"
+ },
+ "runtimeImagePullPolicy": {
+ "description": "RuntimeImagePullPolicy specifies when to pull a runtime image.",
+ "type": "string"
+ },
+ "scriptDownloadProxyConfig": {
+ "description": "ScriptDownloadProxyConfig optionally specifies the http and https proxy to use when downloading scripts",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.ProxyConfig"
+ },
+ "scriptsUrl": {
+ "description": "ScriptsURL is a URL describing where to fetch the S2I scripts from during build process. This url can be a reference within the builder image if the scheme is specified as image://",
+ "type": "string"
+ },
+ "secretCode": {
+ "description": "SecretCode",
+ "type": "string"
+ },
+ "securityOpt": {
+ "description": "SecurityOpt are passed as options to the docker containers launched by s2i.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "sourceUrl": {
+ "description": "SourceURL is url of the codes such as https://github.com/a/b.git",
+ "type": "string"
+ },
+ "tag": {
+ "description": "Tag is a result image tag name.",
+ "type": "string"
+ },
+ "taintKey": {
+ "description": "The name of taint.",
+ "type": "string"
+ },
+ "usage": {
+ "description": "Usage allows for properly shortcircuiting s2i logic when `s2i usage` is invoked",
+ "type": "boolean"
+ },
+ "workingDir": {
+ "description": "WorkingDir describes temporary directory used for downloading sources, scripts and tar operations.",
+ "type": "string"
+ },
+ "workingSourceDir": {
+ "description": "WorkingSourceDir describes the subdirectory off of WorkingDir set up during the repo download that is later used as the root for ignore processing",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun": {
+ "description": "S2iRun is the Schema for the s2iruns API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iRun",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunList": {
+ "description": "S2iRunList contains a list of S2iRun",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRun"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "S2iRunList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunSpec": {
+ "description": "S2iRunSpec defines the desired state of S2iRun",
+ "type": "object",
+ "required": [
+ "builderName"
+ ],
+ "properties": {
+ "backoffLimit": {
+ "description": "BackoffLimit limits the restart count of each s2irun. Default is 0",
+ "type": "integer",
+ "format": "int32"
+ },
+ "builderName": {
+ "description": "BuilderName specify the name of s2ibuilder, required",
+ "type": "string"
+ },
+ "newRevisionId": {
+ "description": "NewRevisionId override the default NewRevisionId in its s2ibuilder.",
+ "type": "string"
+ },
+ "newSourceURL": {
+ "description": "NewSourceURL is used to download new binary artifacts",
+ "type": "string"
+ },
+ "newTag": {
+ "description": "NewTag override the default tag in its s2ibuilder, image name cannot be changed.",
+ "type": "string"
+ },
+ "secondsAfterFinished": {
+ "description": "SecondsAfterFinished if is set and greater than zero, and the job created by s2irun become successful or failed , the job will be auto deleted after SecondsAfterFinished",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iRunStatus": {
+ "description": "S2iRunStatus defines the observed state of S2iRun",
+ "type": "object",
+ "properties": {
+ "completionTime": {
+ "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ },
+ "kubernetesJobName": {
+ "description": "KubernetesJobName is the job name in k8s",
+ "type": "string"
+ },
+ "logURL": {
+ "description": "LogURL is uesd for external log handler to let user know where is log located in",
+ "type": "string"
+ },
+ "runState": {
+ "description": "RunState indicates whether this job is done or failed",
+ "type": "string"
+ },
+ "s2iBuildResult": {
+ "description": "S2i build result info.",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuildResult"
+ },
+ "s2iBuildSource": {
+ "description": "S2i build source info.",
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.S2iBuildSource"
+ },
+ "startTime": {
+ "description": "StartTime represent when this run began",
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.UserDefineTemplate": {
+ "type": "object",
+ "properties": {
+ "builderImage": {
+ "description": "BaseImage specify which version of this template to use",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name specify a template to use, so many fields in Config can left empty",
+ "type": "string"
+ },
+ "parameters": {
+ "description": "Parameters must use with `template`, fill some parameters which template will use",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.Parameter"
+ }
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.VolumeSpec": {
+ "description": "VolumeSpec represents a single volume mount point.",
+ "type": "object",
+ "properties": {
+ "destination": {
+ "description": "Destination is the path to mount the volume to - absolute or relative.",
+ "type": "string"
+ },
+ "keep": {
+ "description": "Keep indicates if the mounted data should be kept in the final image.",
+ "type": "boolean"
+ },
+ "source": {
+ "description": "Source is a reference to the volume source.",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.BitbucketServerSource": {
+ "type": "object",
+ "properties": {
+ "api_uri": {
+ "type": "string"
+ },
+ "credential_id": {
+ "type": "string"
+ },
+ "discover_branches": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "discover_pr_from_forks": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscoverPRFromForks"
+ },
+ "discover_pr_from_origin": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "git_clone_option": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitCloneOption"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "regex_filter": {
+ "type": "string"
+ },
+ "repo": {
+ "type": "string"
+ },
+ "scm_id": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject": {
+ "description": "DevOpsProject is the Schema for the devopsprojects API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "DevOpsProject",
+ "version": "v1alpha3"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectList": {
+ "description": "DevOpsProjectList contains a list of DevOpsProject",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProject"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "DevOpsProjectList",
+ "version": "v1alpha3"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectSpec": {
+ "description": "DevOpsProjectSpec defines the desired state of DevOpsProject",
+ "type": "object"
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DevOpsProjectStatus": {
+ "description": "DevOpsProjectStatus defines the observed state of DevOpsProject",
+ "type": "object",
+ "properties": {
+ "adminNamespace": {
+ "description": "INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run \"make\" to regenerate code after modifying this file",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscarderProperty": {
+ "type": "object",
+ "properties": {
+ "days_to_keep": {
+ "type": "string"
+ },
+ "num_to_keep": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscoverPRFromForks": {
+ "type": "object",
+ "properties": {
+ "strategy": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "trust": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitCloneOption": {
+ "type": "object",
+ "properties": {
+ "depth": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "shallow": {
+ "type": "boolean"
+ },
+ "timeout": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitSource": {
+ "type": "object",
+ "properties": {
+ "credential_id": {
+ "type": "string"
+ },
+ "discover_branches": {
+ "type": "boolean"
+ },
+ "git_clone_option": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitCloneOption"
+ },
+ "regex_filter": {
+ "type": "string"
+ },
+ "scm_id": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GithubSource": {
+ "type": "object",
+ "properties": {
+ "api_uri": {
+ "type": "string"
+ },
+ "credential_id": {
+ "type": "string"
+ },
+ "discover_branches": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "discover_pr_from_forks": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscoverPRFromForks"
+ },
+ "discover_pr_from_origin": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "git_clone_option": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitCloneOption"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "regex_filter": {
+ "type": "string"
+ },
+ "repo": {
+ "type": "string"
+ },
+ "scm_id": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.MultiBranchJobTrigger": {
+ "type": "object",
+ "properties": {
+ "create_action_job_to_trigger": {
+ "type": "string"
+ },
+ "delete_action_job_to_trigger": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.MultiBranchPipeline": {
+ "type": "object",
+ "required": [
+ "name",
+ "source_type",
+ "script_path"
+ ],
+ "properties": {
+ "bitbucket_server_source": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.BitbucketServerSource"
+ },
+ "description": {
+ "type": "string"
+ },
+ "discarder": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscarderProperty"
+ },
+ "git_source": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GitSource"
+ },
+ "github_source": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.GithubSource"
+ },
+ "multibranch_job_trigger": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.MultiBranchJobTrigger"
+ },
+ "name": {
+ "type": "string"
+ },
+ "script_path": {
+ "type": "string"
+ },
+ "single_svn_source": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.SingleSvnSource"
+ },
+ "source_type": {
+ "type": "string"
+ },
+ "svn_source": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.SvnSource"
+ },
+ "timer_trigger": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.TimerTrigger"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.NoScmPipeline": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "disable_concurrent": {
+ "type": "boolean"
+ },
+ "discarder": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.DiscarderProperty"
+ },
+ "jenkinsfile": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "parameters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Parameter"
+ }
+ },
+ "remote_trigger": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.RemoteTrigger"
+ },
+ "timer_trigger": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.TimerTrigger"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Parameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "type"
+ ],
+ "properties": {
+ "default_value": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline": {
+ "description": "Pipeline is the Schema for the pipelines API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "Pipeline",
+ "version": "v1alpha3"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineList": {
+ "description": "PipelineList contains a list of Pipeline",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.Pipeline"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "devops.kubesphere.io",
+ "kind": "PipelineList",
+ "version": "v1alpha3"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineSpec": {
+ "description": "PipelineSpec defines the desired state of Pipeline",
+ "type": "object",
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "multi_branch_pipeline": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.MultiBranchPipeline"
+ },
+ "pipeline": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.NoScmPipeline"
+ },
+ "type": {
+ "description": "INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run \"make\" to regenerate code after modifying this file",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.PipelineStatus": {
+ "description": "PipelineStatus defines the observed state of Pipeline",
+ "type": "object"
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.RemoteTrigger": {
+ "type": "object",
+ "properties": {
+ "token": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.SingleSvnSource": {
+ "type": "object",
+ "properties": {
+ "credential_id": {
+ "type": "string"
+ },
+ "remote": {
+ "type": "string"
+ },
+ "scm_id": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.SvnSource": {
+ "type": "object",
+ "properties": {
+ "credential_id": {
+ "type": "string"
+ },
+ "excludes": {
+ "type": "string"
+ },
+ "includes": {
+ "type": "string"
+ },
+ "remote": {
+ "type": "string"
+ },
+ "scm_id": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha3.TimerTrigger": {
+ "type": "object",
+ "properties": {
+ "cron": {
+ "description": "user in no scm job",
+ "type": "string"
+ },
+ "interval": {
+ "description": "use in multi-branch job",
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy": {
+ "description": "NamespaceNetworkPolicy is the Schema for the namespacenetworkpolicies API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicySpec"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "network.kubesphere.io",
+ "kind": "NamespaceNetworkPolicy",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicyList": {
+ "description": "NamespaceNetworkPolicyList contains a list of NamespaceNetworkPolicy",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicy"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "network.kubesphere.io",
+ "kind": "NamespaceNetworkPolicyList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceNetworkPolicySpec": {
+ "description": "NamespaceNetworkPolicySpec provides the specification of a NamespaceNetworkPolicy",
+ "type": "object",
+ "properties": {
+ "egress": {
+ "description": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyEgressRule"
+ }
+ },
+ "ingress": {
+ "description": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyIngressRule"
+ }
+ },
+ "policyTypes": {
+ "description": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceSelector": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyEgressRule": {
+ "description": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8",
+ "type": "object",
+ "properties": {
+ "ports": {
+ "description": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort"
+ }
+ },
+ "to": {
+ "description": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyPeer"
+ }
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyIngressRule": {
+ "description": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.",
+ "type": "object",
+ "properties": {
+ "from": {
+ "description": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyPeer"
+ }
+ },
+ "ports": {
+ "description": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort"
+ }
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NetworkPolicyPeer": {
+ "description": "NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed",
+ "type": "object",
+ "properties": {
+ "ipBlock": {
+ "description": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.",
+ "$ref": "#/definitions/io.k8s.api.networking.v1.IPBlock"
+ },
+ "namespace": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.NamespaceSelector"
+ },
+ "service": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.ServiceSelector"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.network.v1alpha1.ServiceSelector": {
+ "type": "object",
+ "required": [
+ "name",
+ "namespace"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace": {
+ "description": "Workspace is the Schema for the workspaces API",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceStatus"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "Workspace",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceList": {
+ "description": "WorkspaceList contains a list of Workspace",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.Workspace"
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "tenant.kubesphere.io",
+ "kind": "WorkspaceList",
+ "version": "v1alpha1"
+ }
+ ]
+ },
+ "io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceSpec": {
+ "description": "WorkspaceSpec defines the desired state of Workspace",
+ "type": "object",
+ "properties": {
+ "manager": {
+ "type": "string"
+ },
+ "networkIsolation": {
+ "type": "boolean"
+ }
+ }
+ },
+ "io.kubesphere.kubesphere.pkg.apis.tenant.v1alpha1.WorkspaceStatus": {
+ "description": "WorkspaceStatus defines the observed state of Workspace",
+ "type": "object"
+ }
+ }
+}
\ No newline at end of file
diff --git a/static/json/kubesphere.json b/static/json/kubesphere.json
new file mode 100644
index 000000000..9b6b05fa2
--- /dev/null
+++ b/static/json/kubesphere.json
@@ -0,0 +1,23158 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "description": "KubeSphere OpenAPI",
+ "title": "KubeSphere",
+ "contact": {
+ "name": "kubesphere",
+ "url": "https://kubesphere.io",
+ "email": "kubesphere@gmail.com"
+ },
+ "license": {
+ "name": "Apache",
+ "url": "http://www.apache.org/licenses/"
+ },
+ "version": "v3.0.0",
+ "x-logo": {
+ "url": "/images/ApiDocs.svg"
+ },
+ "x-taggroups": [
+ {
+ "name": "IAM",
+ "tags": [
+ "Identity Management",
+ "Access Management"
+ ]
+ },
+ {
+ "name": "Resources",
+ "tags": [
+ "Cluster Resources",
+ "Namespace Resources",
+ "User Resources"
+ ]
+ },
+ {
+ "name": "AppStore",
+ "tags": [
+ "Openpitrix Resources"
+ ]
+ },
+ {
+ "name": "Monitoring",
+ "tags": [
+ "Component Status"
+ ]
+ },
+ {
+ "name": "Tenant",
+ "tags": [
+ "Tenant Resources"
+ ]
+ },
+ {
+ "name": "Other",
+ "tags": [
+ "Verification",
+ "Docker Registry"
+ ]
+ },
+ {
+ "name": "DevOps",
+ "tags": [
+ "DevOps Project",
+ "DevOps Project Credential",
+ "DevOps Pipeline",
+ "DevOps Project Member",
+ "DevOps Webhook",
+ "DevOps Jenkinsfile",
+ "DevOps Scm"
+ ]
+ },
+ {
+ "name": "Monitoring",
+ "tags": [
+ "Cluster Metrics",
+ "Node Metrics",
+ "Namespace Metrics",
+ "Workload Metrics",
+ "Pod Metrics",
+ "Container Metrics",
+ "Workspace Metrics",
+ "Component Metrics"
+ ]
+ },
+ {
+ "name": "Logging",
+ "tags": [
+ "Log Query"
+ ]
+ }
+ ]
+ },
+ "paths": {
+ "/kapis/devops.kubesphere.io/v1alpha2/crumbissuer": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get crumb issuer. A CrumbIssuer represents an algorithm to generate a nonce value, known as a crumb, to counter cross site request forgery exploits. Crumbs are typically hashes incorporating information that uniquely identifies an agent that sends a request, along with a guarded secret so that the crumb value cannot be forged by a third party.",
+ "operationId": "GetCrumb",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Crumb"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Crumb"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/checkCron": {
+ "post": {
+ "produces": [
+ "application/json",
+ "charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Check cron script compile.",
+ "operationId": "CheckCron",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.CronData"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.CheckCronRes"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.CheckCronRes"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/credentials/{credential}/usage": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project Credential"
+ ],
+ "summary": "Get the specified credential usage of the DevOps project",
+ "operationId": "GetProjectCredentialUsage",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "credential's ID, e.g. dockerhub-id",
+ "name": "credential",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Credential"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Credential"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get the specified pipeline of the DevOps project",
+ "operationId": "GetPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Pipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Pipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get all branches in the specified pipeline.",
+ "operationId": "GetPipelineBranch",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "filter=%s",
+ "description": "filter remote scm. e.g. origin",
+ "name": "filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "default": "start=0",
+ "description": "the count of branches start.",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "default": "limit=100",
+ "description": "the count of branches limit.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineBranch"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineBranch"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get the specified branch pipeline of the DevOps project",
+ "operationId": "GetBranchPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of devops project",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.BranchPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.BranchPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Run the specified pipeline of the DevOps project.",
+ "operationId": "RunBranchPipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.RunPayload"
+ }
+ },
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.RunPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.RunPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get details in the specified pipeline activity.",
+ "operationId": "GetBranchPipelineRun",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run id, the unique id for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRun"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRun"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/artifacts": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get all artifacts generated from the specified run of the pipeline branch.",
+ "operationId": "GetBranchArtifacts",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "description": "the limit item count of the search.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The filed of \"Url\" in response can download artifacts",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Artifacts"
+ }
+ }
+ },
+ "default": {
+ "description": "The filed of \"Url\" in response can download artifacts",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Artifacts"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/log": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get run logs of the specified pipeline activity.",
+ "operationId": "GetBranchRunLog",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "default": "start=0",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/nodes": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get run nodes.",
+ "operationId": "GetBranchPipelineRunNodes",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run id, the unique id for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "default": "limit=10000",
+ "description": "the limit item count of the search.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/nodes/{node}/steps": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get all steps in the specified node.",
+ "operationId": "GetBranchNodeSteps",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of devops project",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node ID, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/nodes/{node}/steps/{step}": {
+ "post": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Proceed or Break the paused pipeline which waiting for user input.",
+ "operationId": "SubmitBranchInputStep",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node ID, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline step ID, the step in pipeline.",
+ "name": "step",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.CheckPlayload"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/nodes/{node}/steps/{step}/log": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get the step logs in the specified pipeline activity.",
+ "operationId": "GetBranchStepLog",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run id, the unique id for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node id, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline step id, the step in pipeline.",
+ "name": "step",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "default": "start=0",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/nodesdetail": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Get steps details in an activity node. For a node, the steps which is defined inside the node.",
+ "operationId": "GetBranchNodesDetail",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/replay": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Replay the specified pipeline of the DevOps project",
+ "operationId": "ReplayBranchPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ReplayPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ReplayPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/branches/{branch}/runs/{run}/stop": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "(MultiBranchesPipeline) Stop the specified pipeline of the DevOps project.",
+ "operationId": "StopBranchPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of branch, same as repository branch.",
+ "name": "branch",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "blocking=%t",
+ "default": "blocking=false",
+ "description": "stop and between each retries will sleep.",
+ "name": "blocking",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "timeOutInSecs=%d",
+ "default": "timeOutInSecs=10",
+ "description": "the time of stop and between each retries sleep.",
+ "name": "timeOutInSecs",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.StopPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.StopPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/checkScriptCompile": {
+ "post": {
+ "consumes": [
+ "application/x-www-form-urlencoded",
+ "charset=utf-8"
+ ],
+ "produces": [
+ "application/json",
+ "charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Check pipeline script compile.",
+ "operationId": "CheckScriptCompile",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "pipeline=%s",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.ReqScript"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.CheckScript"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.CheckScript"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/consolelog": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get scan reponsitory logs in the specified pipeline.",
+ "operationId": "GetConsoleLog",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get all runs of the specified pipeline",
+ "operationId": "ListPipelineRuns",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "description": "the item number that the search starts from",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "description": "the limit item count of the search",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "branch=%s",
+ "description": "the name of branch, same as repository branch, will be filtered by branch.",
+ "name": "branch",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRunList"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRunList"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Run pipeline.",
+ "operationId": "RunPipeline",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.RunPayload"
+ }
+ },
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.RunPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.RunPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get details in the specified pipeline activity.",
+ "operationId": "GetPipelineRun",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of devops project",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRun"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineRun"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/artifacts": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get all artifacts in the specified pipeline.",
+ "operationId": "GetArtifacts",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "description": "the limit item count of the search.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The filed of \"Url\" in response can download artifacts",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Artifacts"
+ }
+ }
+ },
+ "default": {
+ "description": "The filed of \"Url\" in response can download artifacts",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Artifacts"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/log": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get run logs of the specified pipeline activity.",
+ "operationId": "GetRunLog",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "default": "start=0",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/nodes": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get all nodes in the specified activity. node is the stage in the pipeline task",
+ "operationId": "GetPipelineRunNodes",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of devops project",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRunNodes"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRunNodes"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/nodes/{node}/steps": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get all steps in the specified node.",
+ "operationId": "GetNodeSteps",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of devops project",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node ID, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/nodes/{node}/steps/{step}": {
+ "post": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Proceed or Break the paused pipeline which is waiting for user input.",
+ "operationId": "SubmitInputStep",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.CheckPlayload"
+ }
+ },
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node ID, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline step ID",
+ "name": "step",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/nodes/{node}/steps/{step}/log": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get pipelines step log.",
+ "operationId": "GetStepLog",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline node ID, the stage in pipeline.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline step ID, the step in pipeline.",
+ "name": "step",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "default": "start=0",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/nodesdetail": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Get steps details inside a activity node. For a node, the steps which defined inside the node.",
+ "operationId": "GetNodesDetail",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/replay": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Replay pipeline",
+ "operationId": "ReplayPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ReplayPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ReplayPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/runs/{run}/stop": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Stop pipeline",
+ "operationId": "StopPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline run ID, the unique ID for a pipeline once build.",
+ "name": "run",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "blocking=%t",
+ "default": "blocking=false",
+ "description": "stop and between each retries will sleep.",
+ "name": "blocking",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "timeOutInSecs=%d",
+ "default": "timeOutInSecs=10",
+ "description": "the time of stop and between each retries sleep.",
+ "name": "timeOutInSecs",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.StopPipeline"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.StopPipeline"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/devops/{devops}/pipelines/{pipeline}/scan": {
+ "post": {
+ "produces": [
+ "text/html; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Scan remote Repository, Start a build if have new branch.",
+ "operationId": "ScanBranch",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "DevOps project's ID, e.g. project-RRRRAzLBlLEm",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the CI/CD pipeline",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "delay=%d",
+ "description": "the delay time to scan",
+ "name": "delay",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/jenkins/{path}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "operationId": "func1",
+ "parameters": [
+ {
+ "pattern": "*",
+ "type": "string",
+ "description": "Path stands for any suffix path.",
+ "name": "path",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok"
+ },
+ "default": {
+ "description": "ok"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/scms/{scm}/organizations": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Scm"
+ ],
+ "summary": "List all organizations of the specified source configuration management (SCM) such as Github.",
+ "operationId": "GetSCMOrg",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the ID of the source configuration management (SCM).",
+ "name": "scm",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "credentialId=%s",
+ "description": "credential ID for source configuration management (SCM).",
+ "name": "credentialId",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.SCMOrg"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.SCMOrg"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/scms/{scm}/organizations/{organization}/repositories": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Scm"
+ ],
+ "summary": "List all repositories in the specified organization.",
+ "operationId": "GetOrgRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The ID of the source configuration management (SCM).",
+ "name": "scm",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "organization ID, such as github username.",
+ "name": "organization",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "credentialId=%s",
+ "description": "credential ID for SCM.",
+ "name": "credentialId",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "pageNumber=%d",
+ "description": "page number.",
+ "name": "pageNumber",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "pageSize=%d",
+ "description": "the item count of one page.",
+ "name": "pageSize",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.OrgRepo"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.OrgRepo"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/scms/{scm}/servers": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Scm"
+ ],
+ "summary": "List all servers in the jenkins.",
+ "operationId": "GetSCMServers",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The ID of the source configuration management (SCM).",
+ "name": "scm",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.SCMServer"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.SCMServer"
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Scm"
+ ],
+ "summary": "Create scm server in the jenkins.",
+ "operationId": "CreateSCMServers",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The ID of the source configuration management (SCM).",
+ "name": "scm",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.CreateScmServerReq"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.SCMServer"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.SCMServer"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/scms/{scm}/verify": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Scm"
+ ],
+ "summary": "Validate the access token of the specified source configuration management (SCM) such as Github",
+ "operationId": "Validate",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the ID of the source configuration management (SCM).",
+ "name": "scm",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Validates"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.Validates"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/search": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "Search DevOps resource. More info: https://github.com/jenkinsci/blueocean-plugin/tree/master/blueocean-rest#get-pipelines-across-organization",
+ "operationId": "ListPipelines",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "q=%s",
+ "description": "query pipelines, condition for filtering.",
+ "name": "q",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "filter=%s",
+ "description": "Filter some types of jobs. e.g. no-folder,will not get a job of type folder",
+ "name": "filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "start=%d",
+ "description": "the item number that the search starts from.",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d",
+ "description": "the limit item count of the search.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineList"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.PipelineList"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/tojenkinsfile": {
+ "post": {
+ "consumes": [
+ "application/x-www-form-urlencoded"
+ ],
+ "produces": [
+ "application/json",
+ "charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Jenkinsfile"
+ ],
+ "summary": "Convert json to jenkinsfile format.",
+ "operationId": "ToJenkinsfile",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.ReqJson"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ResJenkinsfile"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ResJenkinsfile"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/tojson": {
+ "post": {
+ "consumes": [
+ "application/x-www-form-urlencoded"
+ ],
+ "produces": [
+ "application/json",
+ "charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Jenkinsfile"
+ ],
+ "summary": "Convert jenkinsfile to json format. Usually the frontend uses json to show or edit pipeline",
+ "operationId": "ToJson",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/devops.ReqJenkinsfile"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ResJson"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/devops.ResJson"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/webhook/git": {
+ "get": {
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Webhook"
+ ],
+ "summary": "Get commit notification by HTTP GET method. Git webhook will request here.",
+ "operationId": "GetNotifyCommit",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "url=%s",
+ "description": "Git url",
+ "name": "url",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ },
+ "post": {
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "text/plain; charset=utf-8"
+ ],
+ "tags": [
+ "DevOps Webhook"
+ ],
+ "summary": "Get commit notification by HTTP POST method. Git webhook will request here.",
+ "operationId": "PostNotifyCommit",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "url=%s",
+ "description": "Git url",
+ "name": "url",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha2/webhook/github": {
+ "post": {
+ "consumes": [
+ "application/x-www-form-urlencoded",
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Webhook"
+ ],
+ "summary": "Get commit notification. Github webhook will request here.",
+ "operationId": "GithubWebhook",
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/devops/{devops}/credentials": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "list the credentials of the specified devops for the current user",
+ "operationId": "ListCredential",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name used to do filtering",
+ "name": "name",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "page=%d",
+ "default": "page=1",
+ "description": "page",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "limit",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "ascending=false",
+ "description": "sort parameters, e.g. ascending=false",
+ "name": "ascending",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "sortBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "create the credential of the specified devops for the current user",
+ "operationId": "CreateCredential",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/devops/{devops}/credentials/{credential}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "get the credential of the specified devops for the current user",
+ "operationId": "GetCredential",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline name",
+ "name": "credential",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "put the credential of the specified devops for the current user",
+ "operationId": "UpdateCredential",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "credential name",
+ "name": "credential",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "delete the credential of the specified devops for the current user",
+ "operationId": "DeleteCredential",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "credential name",
+ "name": "credential",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Secret"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/devops/{devops}/pipelines": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "list the pipelines of the specified devops for the current user",
+ "operationId": "ListPipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "create the pipeline of the specified devops for the current user",
+ "operationId": "CreatePipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/devops/{devops}/pipelines/{pipeline}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "get the pipeline of the specified devops for the current user",
+ "operationId": "getPipelineByName",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline name",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "put the pipeline of the specified devops for the current user",
+ "operationId": "UpdatePipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline name",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Pipeline"
+ ],
+ "summary": "delete the pipeline of the specified devops for the current user",
+ "operationId": "DeletePipeline",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "pipeline name",
+ "name": "pipeline",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Pipeline"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/workspaces/{workspace}/devops": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "List the devopsproject of the specified workspace for the current user",
+ "operationId": "ListDevOpsProject",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "Create the devopsproject of the specified workspace for the current user",
+ "operationId": "CreateDevOpsProject",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/devops.kubesphere.io/v1alpha3/workspaces/{workspace}/devops/{devops}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "Get the devopsproject of the specified workspace for the current user",
+ "operationId": "GetDevOpsProject",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "Put the devopsproject of the specified workspace for the current user",
+ "operationId": "UpdateDevOpsProject",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "DevOps Project"
+ ],
+ "summary": "Get the devopsproject of the specified workspace for the current user",
+ "operationId": "DeleteDevOpsProject",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProject"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/clustermembers": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all members in cluster.",
+ "operationId": "ListClusterMembers",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Add user to current cluster.",
+ "operationId": "CreateClusterMembers",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/clustermembers/{clustermember}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve member details in cluster.",
+ "operationId": "DescribeClusterMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster member's username",
+ "name": "clustermember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update cluster member role bind.",
+ "operationId": "UpdateClusterMember",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ {
+ "type": "string",
+ "description": "cluster member's username",
+ "name": "clustermember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete member in cluster scope.",
+ "operationId": "RemoveClusterMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster member's username",
+ "name": "clustermember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/clustermembers/{clustermember}/clusterroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve user's role templates in cluster.",
+ "operationId": "RetrieveMemberRoleTemplates",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster member's username",
+ "name": "clustermember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/clusterroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all cluster roles.",
+ "operationId": "ListClusterRoles",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create cluster role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "CreateClusterRole",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/clusterroles/{clusterrole}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve cluster role details.",
+ "operationId": "DescribeClusterRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster role name",
+ "name": "clusterrole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update cluster role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "UpdateClusterRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster role name",
+ "name": "clusterrole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete cluster role.",
+ "operationId": "DeleteClusterRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster role name",
+ "name": "clusterrole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Patch cluster role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "PatchClusterRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster role name",
+ "name": "clusterrole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ClusterRole"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/devops/{devops}/members": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all members in the specified devops project.",
+ "operationId": "ListNamespaceMembers",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Batch add devops project members.",
+ "operationId": "CreateNamespaceMembers",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/devops/{devops}/members/{member}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve devops project member details.",
+ "operationId": "DescribeNamespaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "devops project member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update member in devops project.",
+ "operationId": "UpdateNamespaceMember",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "devops project member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Remove member in namespace.",
+ "operationId": "RemoveNamespaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "devops project member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/devops/{devops}/members/{member}/roles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve member's role templates in devops project.",
+ "operationId": "RetrieveMemberRoleTemplates",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "devops project member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/devops/{devops}/roles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all roles in the specified devops project.",
+ "operationId": "ListRoles",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create role in the specified devops project. Automatically aggregate policy rules according to annotation.",
+ "operationId": "CreateNamespaceRole",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/devops/{devops}/roles/{role}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve devops project role details.",
+ "operationId": "DescribeNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update devops project role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "UpdateNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete role in the specified devops project.",
+ "operationId": "DeleteNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Patch devops project role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "PatchNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "devops project name",
+ "name": "devops",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/globalroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all global roles.",
+ "operationId": "ListGlobalRoles",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create global role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "CreateGlobalRole",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/globalroles/{globalrole}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve global role details.",
+ "operationId": "DescribeGlobalRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "global role name",
+ "name": "globalrole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update global role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "UpdateGlobalRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "global role name",
+ "name": "globalrole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete global role.",
+ "operationId": "DeleteGlobalRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "global role name",
+ "name": "globalrole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Patch global role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "PatchGlobalRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "global role name",
+ "name": "globalrole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.GlobalRole"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/namespaces/{namespace}/members": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all members in the specified namespace.",
+ "operationId": "ListNamespaceMembers",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Batch add namespace members.",
+ "operationId": "CreateNamespaceMembers",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/namespaces/{namespace}/members/{member}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve namespace member details.",
+ "operationId": "DescribeNamespaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update member in namespace.",
+ "operationId": "UpdateNamespaceMember",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete member in namespace scope.",
+ "operationId": "RemoveNamespaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/namespaces/{namespace}/members/{member}/roles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve member's role templates in namespace.",
+ "operationId": "RetrieveMemberRoleTemplates",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace member's username",
+ "name": "member",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/namespaces/{namespace}/roles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all roles in the specified namespace.",
+ "operationId": "ListRoles",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create role in the specified namespace. Automatically aggregate policy rules according to annotation.",
+ "operationId": "CreateNamespaceRole",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/namespaces/{namespace}/roles/{role}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve role details.",
+ "operationId": "DescribeNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update namespace role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "UpdateNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete role in the specified namespace.",
+ "operationId": "DeleteNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Patch namespace role.",
+ "operationId": "PatchNamespaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "role name",
+ "name": "role",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Role"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/users": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all users in global scope.",
+ "operationId": "ListUsers",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create user in global scope.",
+ "operationId": "CreateUser",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/users/{user}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve user details.",
+ "operationId": "DescribeUser",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update user info.",
+ "operationId": "UpdateUser",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete user.",
+ "operationId": "DeleteUser",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/users/{user}/globalroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve user's global role templates.",
+ "operationId": "RetrieveMemberRoleTemplates",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/users/{user}/loginrecords": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List user's login records.",
+ "operationId": "ListUserLoginRecords",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username of the user",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/users/{user}/password": {
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Modify user's password.",
+ "operationId": "ModifyPassword",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/iam.PasswordReset"
+ }
+ },
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/workspaces/{workspace}/workspacemembers": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all members in the specified workspace.",
+ "operationId": "ListWorkspaceMembers",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Batch add workspace members.",
+ "operationId": "CreateWorkspaceMembers",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/workspaces/{workspace}/workspacemembers/{workspacemember}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve workspace member details.",
+ "operationId": "DescribeWorkspaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace member's username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.User"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update member in workspace.",
+ "operationId": "UpdateWorkspaceMember",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace member's username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.Member"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete member in workspace scope.",
+ "operationId": "RemoveWorkspaceMember",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace member's username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/workspaces/{workspace}/workspacemembers/{workspacemember}/workspaceroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve member's role templates in workspace.",
+ "operationId": "RetrieveMemberRoleTemplates",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace member's username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/workspaces/{workspace}/workspaceroles": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "List all workspace roles.",
+ "operationId": "ListWorkspaceRoles",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Create workspace role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "CreateWorkspaceRole",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ },
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/iam.kubesphere.io/v1alpha2/workspaces/{workspace}/workspaceroles/{workspacerole}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Retrieve workspace role details.",
+ "operationId": "DescribeWorkspaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace role name",
+ "name": "workspacerole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Update workspace role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "UpdateWorkspaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace role name",
+ "name": "workspacerole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Delete workspace role.",
+ "operationId": "DeleteWorkspaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace role name",
+ "name": "workspacerole",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Access Management"
+ ],
+ "summary": "Patch workspace role. Automatically aggregate policy rules according to annotation.",
+ "operationId": "PatchWorkspaceRole",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspace role name",
+ "name": "workspacerole",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceRole"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/cluster": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Cluster Metrics"
+ ],
+ "summary": "Get cluster-level metric data.",
+ "operationId": "handleClusterMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both cluster CPU usage and disk usage: `cluster_cpu_usage|cluster_disk_size_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/components/{component}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Metrics"
+ ],
+ "summary": "Get component-level metric data of the specific system component.",
+ "operationId": "handleComponentMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "system component to monitor. One of etcd, apiserver, scheduler.",
+ "name": "component",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both etcd server list and total size of the underlying database: `etcd_server_list|etcd_mvcc_db_size`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/kubesphere": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "KubeSphere Metrics"
+ ],
+ "summary": "Get platform-level metric data.",
+ "operationId": "handleKubeSphereMetricsQuery",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Metrics"
+ ],
+ "summary": "Get namespace-level metric data of all namespaces.",
+ "operationId": "handleNamespaceMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both namespace CPU usage and memory usage: `namespace_cpu_usage|namespace_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The namespace filter consists of a regexp pattern. It specifies which namespace data to return. For example, the following filter matches both namespace test and kube-system: `test|kube-system`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort namespaces by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Metrics"
+ ],
+ "summary": "Get namespace-level metric data of the specific namespace.",
+ "operationId": "handleNamespaceMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both namespace CPU usage and memory usage: `namespace_cpu_usage|namespace_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/persistentvolumeclaims": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "PVC Metrics"
+ ],
+ "summary": "Get PVC-level metric data of the specific namespace's PVCs.",
+ "operationId": "handlePVCMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both PVC available and used inodes: `pvc_inodes_available|pvc_inodes_used`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The PVC filter consists of a regexp pattern. It specifies which PVC data to return. For example, the following filter matches any pod whose name begins with redis: `redis.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort PVCs by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/persistentvolumeclaims/{pvc}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "PVC Metrics"
+ ],
+ "summary": "Get PVC-level metric data of a specific PVC. Navigate to the PVC by the PVC's namespace.",
+ "operationId": "handlePVCMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "PVC name.",
+ "name": "pvc",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both PVC available and used inodes: `pvc_inodes_available|pvc_inodes_used`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/pods": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Pod Metrics"
+ ],
+ "summary": "Get pod-level metric data of the specific namespace's pods.",
+ "operationId": "handlePodMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both pod CPU usage and memory usage: `pod_cpu_usage|pod_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The pod filter consists of a regexp pattern. It specifies which pod data to return. For example, the following filter matches any pod whose name begins with redis: `redis.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort pods by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/pods/{pod}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Pod Metrics"
+ ],
+ "summary": "Get pod-level metric data of a specific pod. Navigate to the pod by the pod's namespace.",
+ "operationId": "handlePodMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Pod name.",
+ "name": "pod",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both pod CPU usage and memory usage: `pod_cpu_usage|pod_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/pods/{pod}/containers": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Container Metrics"
+ ],
+ "summary": "Get container-level metric data of a specific pod's containers. Navigate to the pod by the pod's namespace.",
+ "operationId": "handleContainerMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Pod name.",
+ "name": "pod",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both container CPU usage and memory usage: `container_cpu_usage|container_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The container filter consists of a regexp pattern. It specifies which container data to return. For example, the following filter matches container prometheus and prometheus-config-reloader: `prometheus|prometheus-config-reloader`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort containers by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/pods/{pod}/containers/{container}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Container Metrics"
+ ],
+ "summary": "Get container-level metric data of a specific container. Navigate to the container by the pod name and the namespace.",
+ "operationId": "handleContainerMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Pod name.",
+ "name": "pod",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Container name.",
+ "name": "container",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both container CPU usage and memory usage: `container_cpu_usage|container_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/targets/labelsets": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Custom Metrics"
+ ],
+ "summary": "List all available labels and values of a metric within a specific time span.",
+ "operationId": "handleMetricLabelSetQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The name of the metric",
+ "name": "metric",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "End time of query. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.MetricLabelSet"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.MetricLabelSet"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/targets/metadata": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Custom Metrics"
+ ],
+ "summary": "Get metadata of metrics for the specific namespace.",
+ "operationId": "handleMetadataQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metadata"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metadata"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/targets/query": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Custom Metrics"
+ ],
+ "summary": "Make an ad-hoc query in the specific namespace.",
+ "operationId": "handleAdhocQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The expression to be evaluated.",
+ "name": "expr",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metric"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metric"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/workloads": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Workload Metrics"
+ ],
+ "summary": "Get workload-level metric data of a specific namespace's workloads.",
+ "operationId": "handleWorkloadMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both workload CPU usage and memory usage: `workload_cpu_usage|workload_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The workload filter consists of a regexp pattern. It specifies which workload data to return. For example, the following filter matches any workload whose name begins with prometheus: `prometheus.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort workloads by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/workloads/{kind}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Workload Metrics"
+ ],
+ "summary": "Get workload-level metric data of all workloads which belongs to a specific kind.",
+ "operationId": "handleWorkloadMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Workload kind. One of deployment, daemonset, statefulset.",
+ "name": "kind",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both workload CPU usage and memory usage: `workload_cpu_usage|workload_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The workload filter consists of a regexp pattern. It specifies which workload data to return. For example, the following filter matches any workload whose name begins with prometheus: `prometheus.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort workloads by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/namespaces/{namespace}/workloads/{kind}/{workload}/pods": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Pod Metrics"
+ ],
+ "summary": "Get pod-level metric data of a specific workload's pods. Navigate to the workload by the namespace.",
+ "operationId": "handlePodMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the namespace.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Workload kind. One of deployment, daemonset, statefulset.",
+ "name": "kind",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Workload name.",
+ "name": "workload",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both pod CPU usage and memory usage: `pod_cpu_usage|pod_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The pod filter consists of a regexp pattern. It specifies which pod data to return. For example, the following filter matches any pod whose name begins with redis: `redis.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort pods by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/nodes": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Node Metrics"
+ ],
+ "summary": "Get node-level metric data of all nodes.",
+ "operationId": "handleNodeMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both node CPU usage and disk usage: `node_cpu_usage|node_disk_size_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The node filter consists of a regexp pattern. It specifies which node data to return. For example, the following filter matches both node i-caojnter and i-cmu82ogj: `i-caojnter|i-cmu82ogj`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort nodes by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/nodes/{node}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Node Metrics"
+ ],
+ "summary": "Get node-level metric data of the specific node.",
+ "operationId": "handleNodeMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node name.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both node CPU usage and disk usage: `node_cpu_usage|node_disk_size_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/nodes/{node}/pods": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Pod Metrics"
+ ],
+ "summary": "Get pod-level metric data of all pods on a specific node.",
+ "operationId": "handlePodMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node name.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both pod CPU usage and memory usage: `pod_cpu_usage|pod_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The pod filter consists of a regexp pattern. It specifies which pod data to return. For example, the following filter matches any pod whose name begins with redis: `redis.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort pods by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/nodes/{node}/pods/{pod}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Pod Metrics"
+ ],
+ "summary": "Get pod-level metric data of a specific pod. Navigate to the pod by the node where it is scheduled.",
+ "operationId": "handlePodMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Node name.",
+ "name": "node",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Pod name.",
+ "name": "pod",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both pod CPU usage and memory usage: `pod_cpu_usage|pod_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/storageclasses/{storageclass}/persistentvolumeclaims": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "PVC Metrics"
+ ],
+ "summary": "Get PVC-level metric data of the specific storageclass's PVCs.",
+ "operationId": "handlePVCMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The name of the storageclass.",
+ "name": "storageclass",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both PVC available and used inodes: `pvc_inodes_available|pvc_inodes_used`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The PVC filter consists of a regexp pattern. It specifies which PVC data to return. For example, the following filter matches any pod whose name begins with redis: `redis.*`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort PVCs by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/workspaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Workspace Metrics"
+ ],
+ "summary": "Get workspace-level metric data of all workspaces.",
+ "operationId": "handleWorkspaceMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both workspace CPU usage and memory usage: `workspace_cpu_usage|workspace_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The workspace filter consists of a regexp pattern. It specifies which workspace data to return.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort workspaces by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/workspaces/{workspace}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Workspace Metrics"
+ ],
+ "summary": "Get workspace-level metric data of a specific workspace.",
+ "operationId": "handleWorkspaceMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Workspace name.",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both workspace CPU usage and memory usage: `workspace_cpu_usage|workspace_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Additional operations. Currently available types is statistics. It retrieves the total number of namespaces, devops projects, members and roles in this workspace at the moment.",
+ "name": "type",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/monitoring.kubesphere.io/v1alpha3/workspaces/{workspace}/namespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Metrics"
+ ],
+ "summary": "Get namespace-level metric data of a specific workspace.",
+ "operationId": "handleNamespaceMetricsQuery",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Workspace name.",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "The metric name filter consists of a regexp pattern. It specifies which metric data to return. For example, the following filter matches both namespace CPU usage and memory usage: `namespace_cpu_usage|namespace_memory_usage`. View available metrics at [kubesphere.io](https://docs.kubesphere.io/advanced-v2.0/zh-CN/api-reference/monitoring-metrics/).",
+ "name": "metrics_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "The namespace filter consists of a regexp pattern. It specifies which namespace data to return. For example, the following filter matches both namespace test and kube-system: `test|kube-system`.",
+ "name": "resources_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1559347200. ",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Use **start** and **end** to retrieve metric data over a time span. It is a string with Unix time format, eg. 1561939200. ",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.",
+ "name": "time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Sort namespaces by the specified metric. Not applicable if **start** and **end** are provided.",
+ "name": "sort_metric",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc.",
+ "description": "Sort order. One of asc, desc.",
+ "name": "sort_type",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "description": "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 5,
+ "description": "Page size, the maximum number of results in a single page. Defaults to 5.",
+ "name": "limit",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/monitoring.Metrics"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/network.kubesphere.io/v1alpha2/namespaces/{namespace}/topology": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Network Topology"
+ ],
+ "summary": "Get the topology with specifying a namespace",
+ "operationId": "getNamespaceTopology",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.TopologyResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.TopologyResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/network.kubesphere.io/v1alpha2/namespaces/{namespace}/topology/{node_id}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Network Topology"
+ ],
+ "summary": "Get the topology with specifying a node id in the whole topology and specifying a namespace",
+ "operationId": "getNamespaceNodeTopology",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "id of the node in the whole topology",
+ "name": "node_id",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.NodeResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.NodeResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/applications": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List all applications",
+ "operationId": "ListApplications",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=value,key~value",
+ "description": "query conditions, connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List app templates",
+ "operationId": "ListApps",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create a new app template",
+ "operationId": "CreateApp",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified app template",
+ "operationId": "DescribeApp",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified app template",
+ "operationId": "DeleteApp",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified app template",
+ "operationId": "ModifyApp",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Perform recover or suspend operation on app",
+ "operationId": "DoAppAction",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/audits": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List audits information of the specific app template",
+ "operationId": "ListAppVersionAudits",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get active versions of app, can filter with these fields(version_id, app_id, name, owner, description, package_name, status, type), default return all active app versions",
+ "operationId": "ListAppVersions",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create a new app template version",
+ "operationId": "CreateAppVersion",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "Validate format of package(pack by op tool)",
+ "name": "validate",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions/{version}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified app template version",
+ "operationId": "DescribeAppVersion",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified app template version",
+ "operationId": "DeleteAppVersion",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified app template version",
+ "operationId": "ModifyAppVersion",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions/{version}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Perform submit or other operations on app",
+ "operationId": "DoAppVersionAction",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions/{version}/audits": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List audits information of version-specific app template",
+ "operationId": "ListAppVersionAudits",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions/{version}/files": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get app template package files",
+ "operationId": "GetAppVersionFiles",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.GetAppVersionPackageFilesResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.GetAppVersionPackageFilesResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/apps/{app}/versions/{version}/package": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get packages of version-specific app",
+ "operationId": "GetAppVersionPackage",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.GetAppVersionPackageResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.GetAppVersionPackageResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/attachments/{attachment}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get attachment by attachment id",
+ "operationId": "DescribeAttachment",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "attachment id",
+ "name": "attachment",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Attachment"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Attachment"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/categories": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List categories",
+ "operationId": "ListCategories",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create app template category",
+ "operationId": "CreateCategory",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateCategoryRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateCategoryResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateCategoryResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/categories/{category}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified category",
+ "operationId": "DescribeCategory",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "category id",
+ "name": "category",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Category"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Category"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified category",
+ "operationId": "DeleteCategory",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "category id",
+ "name": "category",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified category",
+ "operationId": "ModifyCategory",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyCategoryRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "category id",
+ "name": "category",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/repos": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List repositories in the specified workspace",
+ "operationId": "ListRepos",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create repository in the specified workspace, repository used to store package of app",
+ "operationId": "CreateRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Validate repository",
+ "name": "validate",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/repos/{repo}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified repository in the specified workspace",
+ "operationId": "DescribeRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Repo"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Repo"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified repository in the specified workspace",
+ "operationId": "DeleteRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified repository in the specified workspace",
+ "operationId": "ModifyRepo",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyRepoRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/repos/{repo}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Start index repository event",
+ "operationId": "DoRepoAction",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.RepoActionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/repos/{repo}/events": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get repository events",
+ "operationId": "ListRepoEvents",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/reviews": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get reviews of version-specific app",
+ "operationId": "ListReviews",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionReview"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionReview"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List app templates in the specified workspace.",
+ "operationId": "ListApps",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create a new app template",
+ "operationId": "CreateApp",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified app template",
+ "operationId": "DescribeApp",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified app template",
+ "operationId": "DeleteApp",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified app template",
+ "operationId": "ModifyApp",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Perform recover or suspend operation on app",
+ "operationId": "DoAppAction",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}/versions": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get active versions of app, can filter with these fields(version_id, app_id, name, owner, description, package_name, status, type), default return all active app versions",
+ "operationId": "ListAppVersions",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create a new app template version",
+ "operationId": "CreateAppVersion",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "Validate format of package(pack by op tool)",
+ "name": "validate",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateAppVersionResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}/versions/{version}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified app template version",
+ "operationId": "DescribeAppVersion",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified app template version",
+ "operationId": "DeleteAppVersion",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified app template version",
+ "operationId": "ModifyAppVersion",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyAppVersionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}/versions/{version}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Perform submit or other operations on app",
+ "operationId": "DoAppVersionAction",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/apps/{app}/versions/{version}/audits": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List audits information of version-specific app template",
+ "operationId": "ListAppVersionAudits",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "app template version id",
+ "name": "version",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app template id",
+ "name": "app",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.AppVersionAudit"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/clusters/{cluster}/applications": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List all applications in special cluster",
+ "operationId": "ListApplications",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=value,key~value",
+ "description": "query conditions, connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/clusters/{cluster}/namespaces/{namespace}/applications": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List all applications within the specified namespace",
+ "operationId": "ListApplications",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=value,key~value",
+ "description": "query conditions, connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Deploy a new application",
+ "operationId": "CreateApplication",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateClusterRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/clusters/{cluster}/namespaces/{namespace}/applications/{application}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified application of the namespace",
+ "operationId": "DescribeApplication",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the id of the application",
+ "name": "application",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Application"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Application"
+ }
+ }
+ }
+ },
+ "post": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Upgrade application",
+ "operationId": "UpgradeApplication",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.UpgradeClusterRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the id of the application",
+ "name": "application",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified application",
+ "operationId": "DeleteApplication",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the id of the application",
+ "name": "application",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Modify application",
+ "operationId": "ModifyApplication",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyClusterAttributesRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "the name of the cluster.",
+ "name": "cluster",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the id of the application",
+ "name": "application",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/namespaces/{namespace}/applications": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List all applications within the specified namespace",
+ "operationId": "ListApplications",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=value,key~value",
+ "description": "query conditions, connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "the name of the project.",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/namespaces/{namespace}/applications/{application}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified application of the namespace",
+ "operationId": "DescribeApplication",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the id of the application",
+ "name": "application",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Application"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Application"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/repos": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "List repositories in the specified workspace",
+ "operationId": "ListRepos",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Create repository in the specified workspace, repository used to store package of app",
+ "operationId": "CreateRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Validate repository",
+ "name": "validate",
+ "in": "query"
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.CreateRepoResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/repos/{repo}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Describe the specified repository in the specified workspace",
+ "operationId": "DescribeRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Repo"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/openpitrix.Repo"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Delete the specified repository in the specified workspace",
+ "operationId": "DeleteRepo",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Patch the specified repository in the specified workspace",
+ "operationId": "ModifyRepo",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.ModifyRepoRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/repos/{repo}/action": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Start index repository event",
+ "operationId": "DoRepoAction",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/openpitrix.RepoActionRequest"
+ }
+ },
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/openpitrix.io/v1/workspaces/{workspace}/repos/{repo}/events": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Openpitrix Resources"
+ ],
+ "summary": "Get repository events",
+ "operationId": "ListRepoEvents",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "repo id",
+ "name": "repo",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/operations.kubesphere.io/v1alpha2/namespaces/{namespace}/jobs/{job}": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "summary": "Rerun job whether the job is complete or not",
+ "operationId": "handleJobReRun",
+ "deprecated": true,
+ "parameters": [
+ {
+ "type": "string",
+ "description": "job name",
+ "name": "job",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the namespace where the job runs in",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "action must be \"rerun\"",
+ "name": "action",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "version of job, rerun when the version matches",
+ "name": "resourceVersion",
+ "in": "query",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/abnormalworkloads": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Cluster Resources"
+ ],
+ "summary": "get abnormal workloads' count of whole cluster",
+ "operationId": "handleGetNamespacedAbnormalWorkloads",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.Workloads"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.Workloads"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/componenthealth": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "Get the health status of system components.",
+ "operationId": "handleGetSystemHealthStatus",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.HealthStatus"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.HealthStatus"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/components": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "List the system components.",
+ "operationId": "handleGetComponents",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/components/{component}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "Describe the specified system component.",
+ "operationId": "handleGetComponentStatus",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "component name",
+ "name": "component",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/git/verify": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Verification"
+ ],
+ "summary": "Verify if the kubernetes secret has read access to the git repository",
+ "operationId": "handleVerifyGitCredential",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/git.AuthInfo"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/abnormalworkloads": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "get abnormal workloads' count of specified namespace",
+ "operationId": "handleGetNamespacedAbnormalWorkloads",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.Workloads"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.Workloads"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/daemonsets/{daemonset}/revisions/{revision}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Get the specified daemonset revision",
+ "operationId": "handleGetDaemonSetRevision",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the daemonset",
+ "name": "daemonset",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the namespace of the daemonset",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the revision of the daemonset",
+ "name": "revision",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.DaemonSet"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.DaemonSet"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/deployments/{deployment}/revisions/{revision}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Get the specified deployment revision",
+ "operationId": "handleGetDeploymentRevision",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of deployment",
+ "name": "deployment",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the namespace of the deployment",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the revision of the deployment",
+ "name": "revision",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ReplicaSet"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.ReplicaSet"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/quotas": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "get specified namespace's resource quota and usage",
+ "operationId": "handleGetNamespaceQuotas",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ResourceQuota"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ResourceQuota"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/router": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "List router of a specified project",
+ "operationId": "handleGetRouter",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Update a router for a specified project",
+ "operationId": "handleUpdateRouter",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Create a router for a specified project",
+ "operationId": "handleCreateRouter",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "List router of a specified project",
+ "operationId": "handleDeleteRouter",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/statefulsets/{statefulset}/revisions/{revision}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Get the specified statefulset revision",
+ "operationId": "handleGetStatefulSetRevision",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the statefulset",
+ "name": "statefulset",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the namespace of the statefulset",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the revision of the statefulset",
+ "name": "revision",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.StatefulSet"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.StatefulSet"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/namespaces/{namespace}/{resources}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespace Resources"
+ ],
+ "summary": "Namespace level resource query",
+ "operationId": "handleListNamespaceResources",
+ "deprecated": true,
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace level resource type, e.g. pods,jobs,configmaps,services.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "key=%s,key~%s",
+ "description": "query conditions,connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/quotas": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Cluster Resources"
+ ],
+ "summary": "get whole cluster's resource usage",
+ "operationId": "handleGetClusterQuotas",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ResourceQuota"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ResourceQuota"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/registry/blob": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Docker Registry"
+ ],
+ "summary": "Retrieve the blob from the registry identified",
+ "operationId": "handleGetRegistryEntry",
+ "parameters": [
+ {
+ "type": "string",
+ "format": "image=%s",
+ "description": "query image, condition for filtering.",
+ "name": "image",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "namespace=%s",
+ "description": "namespace which secret in.",
+ "name": "namespace",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "secret=%s",
+ "description": "secret name",
+ "name": "secret",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/registries.ImageDetails"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/registries.ImageDetails"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/registry/verify": {
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Verification"
+ ],
+ "summary": "verify if a user has access to the docker registry",
+ "operationId": "handleVerifyRegistryCredential",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/api.RegistryCredential"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/users/{user}/kubeconfig": {
+ "get": {
+ "produces": [
+ "text/plain",
+ "application/json"
+ ],
+ "tags": [
+ "User Resources"
+ ],
+ "summary": "get users' kubeconfig",
+ "operationId": "GetKubeconfig",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "type": "string"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/users/{user}/kubectl": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "User Resources"
+ ],
+ "summary": "get user's kubectl pod",
+ "operationId": "GetKubectlPod",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "username",
+ "name": "user",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PodInfo"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PodInfo"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha2/{resources}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Cluster Resources"
+ ],
+ "summary": "Cluster level resources",
+ "operationId": "handleListNamespaceResources",
+ "deprecated": true,
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster level resource type, e.g. nodes,workspaces,storageclasses,clusterrole.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "format": "key=value,key~value",
+ "description": "query conditions, connect multiple conditions with commas, equal symbol for exact query, wave symbol for fuzzy query e.g. name~a",
+ "name": "conditions",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "limit=%d,page=%d",
+ "default": "limit=10,page=1",
+ "description": "paging query, e.g. limit=100,page=1",
+ "name": "paging",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "reverse",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "orderBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/componenthealth": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "Get the health status of system components.",
+ "operationId": "handleGetSystemHealthStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.HealthStatus"
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.HealthStatus"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/components": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "List the system components.",
+ "operationId": "handleGetComponents",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/components/{component}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Component Status"
+ ],
+ "summary": "Describe the specified system component.",
+ "operationId": "handleGetComponentStatus",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "component name",
+ "name": "component",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/namespaces/{namespace}/{resources}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespaced Resource"
+ ],
+ "summary": "Namespace level resource query",
+ "operationId": "handleListResources",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace level resource type, e.g. pods,jobs,configmaps,services.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name used to do filtering",
+ "name": "name",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "page=%d",
+ "default": "page=1",
+ "description": "page",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "limit",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "ascending=false",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "ascending",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "sortBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/namespaces/{namespace}/{resources}/{name}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Namespaced Resource"
+ ],
+ "summary": "Namespace level get resource query",
+ "operationId": "handleGetResources",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "the name of the project",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "namespace level resource type, e.g. pods,jobs,configmaps,services.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of resource",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/{resources}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Clustered Resource"
+ ],
+ "summary": "Cluster level resources",
+ "operationId": "handleListResources",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster level resource type, e.g. pods,jobs,configmaps,services.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name used to do filtering",
+ "name": "name",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "format": "page=%d",
+ "default": "page=1",
+ "description": "page",
+ "name": "page",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "limit",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "ascending=false",
+ "description": "sort parameters, e.g. reverse=true",
+ "name": "ascending",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "sort parameters, e.g. orderBy=createTime",
+ "name": "sortBy",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/resources.kubesphere.io/v1alpha3/{resources}/{name}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Clustered Resource"
+ ],
+ "summary": "Cluster level resource",
+ "operationId": "handleGetResources",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "cluster level resource type, e.g. pods,jobs,configmaps,services.",
+ "name": "resources",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the name of the clustered resources",
+ "name": "name",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok"
+ },
+ "default": {
+ "description": "ok"
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/graph": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get graph from all namespaces",
+ "operationId": "getNamespacesGraph",
+ "parameters": [
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "workload",
+ "description": "type of the generated service graph. Available graph types: [app, service, versionedApp, workload].",
+ "name": "graphType",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "none",
+ "description": "app box grouping characteristic. Available groupings: [app, none, version].",
+ "name": "groupBy",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which time point in UNIX timestamp, default now",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": false,
+ "description": "flag for injecting the requested service node between source and destination nodes.",
+ "name": "injectServiceNodes",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.graphResponse"
+ }
+ },
+ "400": {
+ "description": "bad request",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.BadRequestError"
+ }
+ },
+ "404": {
+ "description": "not found",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.NotFoundError"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.graphResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/apps/{app}/health": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get app health",
+ "operationId": "getAppHealth",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of a namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "app name",
+ "name": "app",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "the rate interval used for fetching error rate",
+ "name": "rateInterval",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the time to use for query",
+ "name": "queryTime",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.appHealthResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.appHealthResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/apps/{app}/metrics": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get app metrics from a specific namespace",
+ "operationId": "getAppMetrics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of the app",
+ "name": "app",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "type of metrics type, fetch all metrics when empty, e.g. request_count, request_duration, request_error_count",
+ "name": "filters[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which UNIX time to extract metrics",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 1800,
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 15,
+ "description": "step between graph data points, in seconds.",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "1m",
+ "description": "metrics rate intervals, e.g. 20s",
+ "name": "rateInterval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "outbound",
+ "description": "traffic direction: 'inbound' or 'outbound'",
+ "name": "direction",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of quantiles to fetch, fetch no quantiles when empty. eg. 0.5, 0.9, 0.99",
+ "name": "quantiles[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of labels to use for grouping metrics(via Prometheus 'by' clause), e.g. source_workload, destination_service_name",
+ "name": "byLabels[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "all protocols",
+ "description": "request protocol for the telemetry, e.g. http/tcp/grpc",
+ "name": "requestProtocol",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "source",
+ "description": "istio telemetry reporter, 'source' or 'destination'",
+ "name": "reporter",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/graph": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get service graph for a specific namespace",
+ "operationId": "getNamespaceGraph",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of a namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "workload",
+ "description": "type of the generated service graph. Available graph types: [app, service, versionedApp, workload].",
+ "name": "graphType",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "none",
+ "description": "app box grouping characteristic. Available groupings: [app, none, version].",
+ "name": "groupBy",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which time point in UNIX timestamp, default now",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": false,
+ "description": "flag for injecting the requested service node between source and destination nodes.",
+ "name": "injectServiceNodes",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.graphResponse"
+ }
+ },
+ "400": {
+ "description": "bad request",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.BadRequestError"
+ }
+ },
+ "404": {
+ "description": "not found",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.NotFoundError"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.graphResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/health": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get app/service/workload health of a namespace",
+ "operationId": "getNamespaceHealth",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of a namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "the rate interval used for fetching error rate",
+ "name": "rateInterval",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the time to use for query",
+ "name": "queryTime",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.namespaceAppHealthResponse"
+ }
+ },
+ "400": {
+ "description": "bad request",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.BadRequestError"
+ }
+ },
+ "404": {
+ "description": "not found",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.NotFoundError"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.namespaceAppHealthResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/metrics": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get metrics from a specific namespace",
+ "operationId": "getNamespaceMetrics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "type of metrics type, fetch all metrics when empty, e.g. request_count, request_duration, request_error_count",
+ "name": "filters[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which UNIX time to extract metrics",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 1800,
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 15,
+ "description": "step between graph data points, in seconds.",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "1m",
+ "description": "metrics rate intervals, e.g. 20s",
+ "name": "rateInterval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "outbound",
+ "description": "traffic direction: 'inbound' or 'outbound'",
+ "name": "direction",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of quantiles to fetch, fetch no quantiles when empty. eg. 0.5, 0.9, 0.99",
+ "name": "quantiles[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of labels to use for grouping metrics(via Prometheus 'by' clause), e.g. source_workload, destination_service_name",
+ "name": "byLabels[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "all protocols",
+ "description": "request protocol for the telemetry, e.g. http/tcp/grpc",
+ "name": "requestProtocol",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "source",
+ "description": "istio telemetry reporter, 'source' or 'destination'",
+ "name": "reporter",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/services/{service}/health": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get service health",
+ "operationId": "getServiceHealth",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of a namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "service name",
+ "name": "service",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "the rate interval used for fetching error rate",
+ "name": "rateInterval",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the time to use for query",
+ "name": "queryTime",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.serviceHealthResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.serviceHealthResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/services/{service}/metrics": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get service metrics from a specific namespace",
+ "operationId": "getServiceMetrics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of the service",
+ "name": "service",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "type of metrics type, fetch all metrics when empty, e.g. request_count, request_duration, request_error_count",
+ "name": "filters[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which UNIX time to extract metrics",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 1800,
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 15,
+ "description": "step between graph data points, in seconds.",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "1m",
+ "description": "metrics rate intervals, e.g. 20s",
+ "name": "rateInterval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "outbound",
+ "description": "traffic direction: 'inbound' or 'outbound'",
+ "name": "direction",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of quantiles to fetch, fetch no quantiles when empty. eg. 0.5, 0.9, 0.99",
+ "name": "quantiles[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of labels to use for grouping metrics(via Prometheus 'by' clause), e.g. source_workload, destination_service_name",
+ "name": "byLabels[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "all protocols",
+ "description": "request protocol for the telemetry, e.g. http/tcp/grpc",
+ "name": "requestProtocol",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "source",
+ "description": "istio telemetry reporter, 'source' or 'destination'",
+ "name": "reporter",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/services/{service}/traces": {
+ "get": {
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get tracing of a service, should have servicemesh enabled first",
+ "operationId": "getServiceTracing",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace of service",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of service queried",
+ "name": "service",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "start of time range want to query, in unix timestamp",
+ "name": "start",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "end of time range want to query, in unix timestamp",
+ "name": "end",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 10,
+ "description": "maximum tracing entries returned at one query, default 10",
+ "name": "limit",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "loopback of duration want to query, e.g. 30m/1h/2d",
+ "name": "loopback",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "maximum duration of a request",
+ "name": "maxDuration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "minimum duration of a request",
+ "name": "minDuration",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/workloads/{workload}/health": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get workload health",
+ "operationId": "getWorkloadHealth",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of a namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workload name",
+ "name": "workload",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "10m",
+ "description": "the rate interval used for fetching error rate",
+ "name": "rateInterval",
+ "in": "query",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "the time to use for query",
+ "name": "queryTime",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.workloadHealthResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.workloadHealthResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/servicemesh.kubesphere.io/v1alpha2/namespaces/{namespace}/workloads/{workload}/metrics": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "ServiceMesh"
+ ],
+ "summary": "Get workload metrics from a specific namespace",
+ "operationId": "getWorkloadMetrics",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "name of the namespace",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of the workload",
+ "name": "workload",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "type of metrics type, fetch all metrics when empty, e.g. request_count, request_duration, request_error_count",
+ "name": "filters[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "from which UNIX time to extract metrics",
+ "name": "queryTime",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 1800,
+ "description": "duration of the query period, in seconds",
+ "name": "duration",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": 15,
+ "description": "step between graph data points, in seconds.",
+ "name": "step",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "1m",
+ "description": "metrics rate intervals, e.g. 20s",
+ "name": "rateInterval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "outbound",
+ "description": "traffic direction: 'inbound' or 'outbound'",
+ "name": "direction",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of quantiles to fetch, fetch no quantiles when empty. eg. 0.5, 0.9, 0.99",
+ "name": "quantiles[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "[]",
+ "description": "list of labels to use for grouping metrics(via Prometheus 'by' clause), e.g. source_workload, destination_service_name",
+ "name": "byLabels[]",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "all protocols",
+ "description": "request protocol for the telemetry, e.g. http/tcp/grpc",
+ "name": "requestProtocol",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "source",
+ "description": "istio telemetry reporter, 'source' or 'destination'",
+ "name": "reporter",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.metricsResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/auditing/events": {
+ "get": {
+ "consumes": [
+ "application/json",
+ "application/xml"
+ ],
+ "produces": [
+ "application/json",
+ "text/plain"
+ ],
+ "tags": [
+ "Auditing Query"
+ ],
+ "summary": "Query auditing events against the cluster",
+ "operationId": "Auditing",
+ "parameters": [
+ {
+ "type": "string",
+ "default": "query",
+ "description": "Operation type. This can be one of three types: `query` (for querying events), `statistics` (for retrieving statistical data), `histogram` (for displaying events count by time interval). Defaults to query.",
+ "name": "operation",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of workspaces. This field restricts the query to specified workspaces. For example, the following filter matches the workspace my-ws and demo-ws: `my-ws,demo-ws`.",
+ "name": "workspace_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **workspace_filter**, this field performs fuzzy matching on workspaces. For example, the following value limits the query to workspaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "workspace_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of namespaces. This field restricts the query to specified `ObjectRef.Namespace`.",
+ "name": "objectref_namespace_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **objectref_namespace_filter**, this field performs fuzzy matching on `ObjectRef.Namespace`.",
+ "name": "objectref_namespace_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of names. This field restricts the query to specified `ObjectRef.Name`.",
+ "name": "objectref_name_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **objectref_name_filter**, this field performs fuzzy matching on `ObjectRef.Name`.",
+ "name": "objectref_name_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of levels. This know values are Metadata, Request, RequestResponse.",
+ "name": "level_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of verbs. This field restricts the query to specified verb. This field restricts the query to specified `Verb`.",
+ "name": "verb_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of user. This field restricts the query to specified user. For example, the following filter matches the user user1 and user2: `user1,user2`.",
+ "name": "user_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **user_filter**, this field performs fuzzy matching on 'User.username'. For example, the following value limits the query to user whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "user_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. This field performs fuzzy matching on 'User.Groups'. For example, the following value limits the query to group which contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "group_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. This field performs fuzzy matching on 'SourceIPs'. For example, the following value limits the query to SourceIPs which contains 127.0 *OR* 192.168.: `127.0,192.168.`.",
+ "name": "source_ip_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of resource. This field restricts the query to specified ip. This field restricts the query to specified `ObjectRef.Resource`.",
+ "name": "objectref_resource_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of subresource. This field restricts the query to specified subresource. This field restricts the query to specified `ObjectRef.Subresource`.",
+ "name": "objectref_subresource_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of response status code. This field restricts the query to specified response status code. This field restricts the query to specified `ResponseStatus.code`.",
+ "name": "response_code_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of response status. This field restricts the query to specified response status. This field restricts the query to specified `ResponseStatus.status`.",
+ "name": "response_status_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query (limits `RequestReceivedTimestamp`). The format is a string representing seconds since the epoch, eg. 1136214245.",
+ "name": "start_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query (limits `RequestReceivedTimestamp`). The format is a string representing seconds since the epoch, eg. 1136214245.",
+ "name": "end_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "15m",
+ "description": "Time interval. It requires **operation** is set to `histogram`. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).",
+ "name": "interval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc",
+ "description": "Sort order. One of asc, desc. This field sorts events by `RequestReceivedTimestamp`.",
+ "name": "sort",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 0,
+ "description": "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to `query`. Defaults to 0 (i.e. from the beginning of the result set).",
+ "name": "from",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "Size of result set to return. It requires **operation** is set to `query`. Defaults to 10 (i.e. 10 event records).",
+ "name": "size",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha1.APIResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha1.APIResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/clusters": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List clusters available to users",
+ "operationId": "ListClusters",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/events": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Events Query"
+ ],
+ "summary": "Query events against the cluster",
+ "operationId": "Events",
+ "parameters": [
+ {
+ "type": "string",
+ "default": "query",
+ "description": "Operation type. This can be one of three types: `query` (for querying events), `statistics` (for retrieving statistical data), `histogram` (for displaying events count by time interval). Defaults to query.",
+ "name": "operation",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of workspaces. This field restricts the query to specified workspaces. For example, the following filter matches the workspace my-ws and demo-ws: `my-ws,demo-ws`.",
+ "name": "workspace_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **workspace_filter**, this field performs fuzzy matching on workspaces. For example, the following value limits the query to workspaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "workspace_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of namespaces. This field restricts the query to specified `involvedObject.namespace`.",
+ "name": "involved_object_namespace_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **involved_object_namespace_filter**, this field performs fuzzy matching on `involvedObject.namespace`",
+ "name": "involved_object_namespace_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of names. This field restricts the query to specified `involvedObject.name`.",
+ "name": "involved_object_name_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **involved_object_name_filter**, this field performs fuzzy matching on `involvedObject.name`.",
+ "name": "involved_object_name_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of kinds. This field restricts the query to specified `involvedObject.kind`.",
+ "name": "involved_object_kind_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of reasons. This field restricts the query to specified `reason`.",
+ "name": "reason_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **reason_filter**, this field performs fuzzy matching on `reason`.",
+ "name": "reason_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. This field performs fuzzy matching on `message`.",
+ "name": "message_search",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Type of event matching on `type`. This can be one of two types: `Warning`, `Normal`",
+ "name": "type_filter",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query (limits `lastTimestamp`). The format is a string representing seconds since the epoch, eg. 1136214245.",
+ "name": "start_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query (limits `lastTimestamp`). The format is a string representing seconds since the epoch, eg. 1136214245.",
+ "name": "end_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "15m",
+ "description": "Time interval. It requires **operation** is set to `histogram`. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).",
+ "name": "interval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc",
+ "description": "Sort order. One of asc, desc. This field sorts events by `lastTimestamp`.",
+ "name": "sort",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 0,
+ "description": "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to `query`. Defaults to 0 (i.e. from the beginning of the result set).",
+ "name": "from",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "Size of result set to return. It requires **operation** is set to `query`. Defaults to 10 (i.e. 10 event records).",
+ "name": "size",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha1.APIResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha1.APIResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/federatednamespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the federated namespaces for the current user",
+ "operationId": "ListFederatedNamespaces",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/logs": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Log Query"
+ ],
+ "summary": "Query logs against the cluster.",
+ "operationId": "QueryLogs",
+ "parameters": [
+ {
+ "type": "string",
+ "default": "query",
+ "description": "Operation type. This can be one of four types: query (for querying logs), statistics (for retrieving statistical data), histogram (for displaying log count by time interval) and export (for exporting logs). Defaults to query.",
+ "name": "operation",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of namespaces. This field restricts the query to specified namespaces. For example, the following filter matches the namespace my-ns and demo-ns: `my-ns,demo-ns`",
+ "name": "namespaces",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **namespaces**, this field performs fuzzy matching on namespaces. For example, the following value limits the query to namespaces whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "namespace_query",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of workloads. This field restricts the query to specified workloads. For example, the following filter matches the workload my-wl and demo-wl: `my-wl,demo-wl`",
+ "name": "workloads",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **workloads**, this field performs fuzzy matching on workloads. For example, the following value limits the query to workloads whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "workload_query",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of pods. This field restricts the query to specified pods. For example, the following filter matches the pod my-po and demo-po: `my-po,demo-po`",
+ "name": "pods",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **pods**, this field performs fuzzy matching on pods. For example, the following value limits the query to pods whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "pod_query",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of containers. This field restricts the query to specified containers. For example, the following filter matches the container my-cont and demo-cont: `my-cont,demo-cont`",
+ "name": "containers",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. Differing from **containers**, this field performs fuzzy matching on containers. For example, the following value limits the query to containers whose name contains the word my(My,MY,...) *OR* demo(Demo,DemO,...): `my,demo`.",
+ "name": "container_query",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "A comma-separated list of keywords. The query returns logs which contain at least one keyword. Case-insensitive matching. For example, if the field is set to `err,INFO`, the query returns any log containing err(ERR,Err,...) *OR* INFO(info,InFo,...).",
+ "name": "log_query",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "15m",
+ "description": "Time interval. It requires **operation** is set to histogram. The format is [0-9]+[smhdwMqy]. Defaults to 15m (i.e. 15 min).",
+ "name": "interval",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Start time of query. Default to 0. The format is a string representing seconds since the epoch, eg. 1559664000.",
+ "name": "start_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "End time of query. Default to now. The format is a string representing seconds since the epoch, eg. 1559664000.",
+ "name": "end_time",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "default": "desc",
+ "description": "Sort order. One of asc, desc. This field sorts logs by timestamp.",
+ "name": "sort",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 0,
+ "description": "The offset from the result set. This field returns query results from the specified offset. It requires **operation** is set to query. Defaults to 0 (i.e. from the beginning of the result set).",
+ "name": "from",
+ "in": "query"
+ },
+ {
+ "type": "integer",
+ "default": 10,
+ "description": "Size of result to return. It requires **operation** is set to query. Defaults to 10 (i.e. 10 log records).",
+ "name": "size",
+ "in": "query"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.APIResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.APIResponse"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/namespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the namespaces for the current user",
+ "operationId": "ListNamespaces",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List all workspaces that belongs to the current user",
+ "operationId": "ListWorkspaces",
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/models.PageableResponse"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Create workspace.",
+ "operationId": "CreateWorkspace",
+ "parameters": [
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Describe workspace.",
+ "operationId": "DescribeWorkspace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Update workspace.",
+ "operationId": "UpdateWorkspace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Delete workspace.",
+ "operationId": "DeleteWorkspace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Update workspace.",
+ "operationId": "PatchWorkspace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1alpha2.WorkspaceTemplate"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/clusters": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List clusters authorized to the specified workspace.",
+ "operationId": "ListWorkspaceClusters",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/devops": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the devops projects of the specified workspace for the current user",
+ "operationId": "ListDevOpsProjects",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/federatednamespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the federated namespaces of the specified workspace for the current user",
+ "operationId": "ListFederatedNamespaces",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/namespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the namespaces of the specified workspace for the current user",
+ "operationId": "ListNamespaces",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/api.ListResult"
+ }
+ }
+ }
+ },
+ "post": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the namespaces of the specified workspace for the current user",
+ "operationId": "CreateNamespace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/namespaces/{namespace}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Retrieve namespace details.",
+ "operationId": "DescribeNamespace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ },
+ "put": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "operationId": "UpdateNamespace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ },
+ "delete": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "Delete namespace.",
+ "operationId": "DeleteNamespace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/errors.Error"
+ }
+ }
+ }
+ },
+ "patch": {
+ "consumes": [
+ "application/json",
+ "application/merge-patch+json",
+ "application/json-patch+json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "operationId": "PatchNamespace",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "project name",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/workspacemembers/{workspacemember}/devops": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the devops projects of specified workspace for the workspace member",
+ "operationId": "ListDevOpsProjects",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspacemember username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/tenant.kubesphere.io/v1alpha2/workspaces/{workspace}/workspacemembers/{workspacemember}/namespaces": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Tenant Resources"
+ ],
+ "summary": "List the namespaces of the specified workspace for the workspace member",
+ "operationId": "ListNamespaces",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "workspace name",
+ "name": "workspace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "workspacemember username",
+ "name": "workspacemember",
+ "in": "path",
+ "required": true
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ },
+ "default": {
+ "description": "ok",
+ "schema": {
+ "$ref": "#/definitions/v1.Namespace"
+ }
+ }
+ }
+ }
+ },
+ "/kapis/terminal.kubesphere.io/v1alpha2/namespaces/{namespace}/pods/{pod}": {
+ "get": {
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "Terminal"
+ ],
+ "summary": "create terminal session",
+ "operationId": "handleTerminalSession",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "namespace of which the pod located in",
+ "name": "namespace",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "name of the pod",
+ "name": "pod",
+ "in": "path",
+ "required": true
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK"
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "._links": {
+ "properties": {
+ "actions": {
+ "$ref": "#/definitions/.actions"
+ },
+ "branches": {
+ "$ref": "#/definitions/.branches"
+ },
+ "queue": {
+ "$ref": "#/definitions/.queue"
+ },
+ "runs": {
+ "$ref": "#/definitions/.runs"
+ },
+ "scm": {
+ "$ref": "#/definitions/.scm"
+ },
+ "self": {
+ "$ref": "#/definitions/.self"
+ },
+ "trends": {
+ "$ref": "#/definitions/.trends"
+ }
+ }
+ },
+ ".actions": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".branch": {
+ "properties": {
+ "isPrimary": {
+ "description": "primary or not",
+ "type": "boolean"
+ },
+ "issues": {
+ "description": "issues",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.branch.issues"
+ }
+ },
+ "url": {
+ "description": "url",
+ "type": "string"
+ }
+ }
+ },
+ ".branch.issues": {},
+ ".branches": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".data": {
+ "properties": {
+ "errors": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.data.errors"
+ }
+ },
+ "jenkinsfile": {
+ "description": "jenkinsfile",
+ "type": "string"
+ },
+ "result": {
+ "description": "result e.g. success",
+ "type": "string"
+ }
+ }
+ },
+ ".data.errors": {
+ "properties": {
+ "error": {
+ "description": "error message",
+ "type": "string"
+ },
+ "location": {
+ "description": "err location",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ ".defaultParameterValue": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "value": {
+ "description": "value",
+ "type": "string"
+ }
+ }
+ },
+ ".latestRun": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.latestRun.actions"
+ }
+ },
+ "artifactsZipFile": {
+ "description": "the artifacts zip file",
+ "type": "string"
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/.latestRun.causeOfBlockage"
+ },
+ "causes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.latestRun.causes"
+ }
+ },
+ "changeSet": {
+ "description": "changeset information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.latestRun.changeSet"
+ }
+ },
+ "description": {
+ "description": "description",
+ "$ref": "#/definitions/.latestRun.description"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "enQueueTime": {
+ "description": "the time of enter the queue",
+ "type": "string"
+ },
+ "endTime": {
+ "description": "the time of end",
+ "type": "string"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "$ref": "#/definitions/.latestRun.name"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "pipeline": {
+ "description": "pipeline",
+ "type": "string"
+ },
+ "replayable": {
+ "description": "Replayable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "runSummary": {
+ "description": "pipeline run summary",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ ".latestRun.actions": {},
+ ".latestRun.causeOfBlockage": {},
+ ".latestRun.causes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "shortDescription": {
+ "description": "short description",
+ "type": "string"
+ },
+ "userId": {
+ "description": "user id",
+ "type": "string"
+ },
+ "userName": {
+ "description": "user name",
+ "type": "string"
+ }
+ }
+ },
+ ".latestRun.changeSet": {},
+ ".latestRun.description": {},
+ ".latestRun.name": {},
+ ".permissions": {
+ "properties": {
+ "configure": {
+ "description": "configure action",
+ "type": "boolean"
+ },
+ "create": {
+ "description": "create action",
+ "type": "boolean"
+ },
+ "read": {
+ "description": "read action",
+ "type": "boolean"
+ },
+ "start": {
+ "description": "start action",
+ "type": "boolean"
+ },
+ "stop": {
+ "description": "stop action",
+ "type": "boolean"
+ }
+ }
+ },
+ ".queue": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".ranges": {
+ "properties": {
+ "ranges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.ranges.ranges"
+ }
+ }
+ }
+ },
+ ".ranges.ranges": {
+ "properties": {
+ "end": {
+ "description": "End build number",
+ "type": "integer",
+ "format": "int32"
+ },
+ "start": {
+ "description": "Start build number",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ ".repositories": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/.repositories.items"
+ }
+ },
+ "lastPage": {
+ "description": "last page",
+ "$ref": "#/definitions/.repositories.lastPage"
+ },
+ "nextPage": {
+ "description": "next page",
+ "$ref": "#/definitions/.repositories.nextPage"
+ },
+ "pageSize": {
+ "description": "page size",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ ".repositories.items": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "defaultBranch": {
+ "description": "default branch",
+ "type": "string"
+ },
+ "description": {
+ "description": "description",
+ "type": "string"
+ },
+ "fullName": {
+ "description": "full name",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "permissions": {
+ "$ref": "#/definitions/.permissions"
+ },
+ "private": {
+ "description": "private or not",
+ "type": "boolean"
+ }
+ }
+ },
+ ".repositories.lastPage": {},
+ ".repositories.nextPage": {},
+ ".runs": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".scm": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".scmSource": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "apiUrl": {
+ "description": "api url",
+ "$ref": "#/definitions/.scmSource.apiUrl"
+ },
+ "id": {
+ "description": "The id of the source configuration management (SCM).",
+ "type": "string"
+ }
+ }
+ },
+ ".scmSource.apiUrl": {},
+ ".self": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ ".trends": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ "api.ListResult": {
+ "required": [
+ "items",
+ "totalItems"
+ ],
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.ListResult.items"
+ }
+ },
+ "totalItems": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "api.ListResult.items": {},
+ "api.RegistryCredential": {
+ "required": [
+ "username",
+ "password",
+ "serverhost"
+ ],
+ "properties": {
+ "password": {
+ "description": "password",
+ "type": "string"
+ },
+ "serverhost": {
+ "description": "registry server host",
+ "type": "string"
+ },
+ "username": {
+ "description": "username",
+ "type": "string"
+ }
+ }
+ },
+ "api.ResourceQuota": {
+ "required": [
+ "namespace",
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "description": "resource quota status",
+ "$ref": "#/definitions/v1.ResourceQuotaStatus"
+ },
+ "namespace": {
+ "description": "namespace",
+ "type": "string"
+ }
+ }
+ },
+ "api.Workloads": {
+ "required": [
+ "namespace",
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "description": "the number of unhealthy workloads",
+ "type": "object",
+ "additionalProperties": {
+ "type": "integer"
+ }
+ },
+ "items": {
+ "description": "unhealthy workloads",
+ "type": "object"
+ },
+ "namespace": {
+ "description": "the name of the namespace",
+ "type": "string"
+ }
+ }
+ },
+ "big.Int": {
+ "required": [
+ "neg",
+ "abs"
+ ],
+ "properties": {
+ "abs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/big.Word"
+ }
+ },
+ "neg": {
+ "type": "boolean"
+ }
+ }
+ },
+ "big.Word": {},
+ "cytoscape.EdgeData": {
+ "required": [
+ "id",
+ "source",
+ "target"
+ ],
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "isMTLS": {
+ "type": "string"
+ },
+ "isUnused": {
+ "type": "boolean"
+ },
+ "responseTime": {
+ "type": "string"
+ },
+ "source": {
+ "type": "string"
+ },
+ "target": {
+ "type": "string"
+ },
+ "traffic": {
+ "$ref": "#/definitions/cytoscape.ProtocolTraffic"
+ }
+ }
+ },
+ "cytoscape.EdgeWrapper": {
+ "required": [
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/cytoscape.EdgeData"
+ }
+ }
+ },
+ "cytoscape.Elements": {
+ "required": [
+ "nodes",
+ "edges"
+ ],
+ "properties": {
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/cytoscape.EdgeWrapper"
+ }
+ },
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/cytoscape.NodeWrapper"
+ }
+ }
+ }
+ },
+ "cytoscape.NodeData": {
+ "required": [
+ "id",
+ "nodeType",
+ "namespace"
+ ],
+ "properties": {
+ "app": {
+ "type": "string"
+ },
+ "destServices": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "hasCB": {
+ "type": "boolean"
+ },
+ "hasMissingSC": {
+ "type": "boolean"
+ },
+ "hasVS": {
+ "type": "boolean"
+ },
+ "id": {
+ "type": "string"
+ },
+ "isDead": {
+ "type": "boolean"
+ },
+ "isGroup": {
+ "type": "string"
+ },
+ "isInaccessible": {
+ "type": "boolean"
+ },
+ "isMisconfigured": {
+ "type": "string"
+ },
+ "isOutside": {
+ "type": "boolean"
+ },
+ "isRoot": {
+ "type": "boolean"
+ },
+ "isServiceEntry": {
+ "type": "string"
+ },
+ "isUnused": {
+ "type": "boolean"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "nodeType": {
+ "type": "string"
+ },
+ "parent": {
+ "type": "string"
+ },
+ "service": {
+ "type": "string"
+ },
+ "traffic": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/cytoscape.ProtocolTraffic"
+ }
+ },
+ "version": {
+ "type": "string"
+ },
+ "workload": {
+ "type": "string"
+ }
+ }
+ },
+ "cytoscape.NodeWrapper": {
+ "required": [
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/cytoscape.NodeData"
+ }
+ }
+ },
+ "cytoscape.ProtocolTraffic": {
+ "required": [
+ "protocol",
+ "rates"
+ ],
+ "properties": {
+ "protocol": {
+ "type": "string"
+ },
+ "rates": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "devops.Artifacts": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "downloadable": {
+ "description": "downloadable or not",
+ "type": "boolean"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "path": {
+ "description": "path",
+ "type": "string"
+ },
+ "size": {
+ "description": "size",
+ "type": "integer",
+ "format": "int32"
+ },
+ "url": {
+ "description": "The url for Download artifacts",
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipeline": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipeline.actions"
+ }
+ },
+ "branch": {
+ "$ref": "#/definitions/.branch"
+ },
+ "disabled": {
+ "description": "disable or not, if disabled, can not do any action",
+ "type": "boolean"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "fullDisplayName": {
+ "description": "full display name",
+ "type": "string"
+ },
+ "fullName": {
+ "description": "full name",
+ "type": "string"
+ },
+ "latestRun": {
+ "$ref": "#/definitions/.latestRun"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "parameters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipeline.parameters"
+ }
+ },
+ "permissions": {
+ "$ref": "#/definitions/.permissions"
+ },
+ "weatherScore": {
+ "description": "the score to description the result of pipeline",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "devops.BranchPipeline.actions": {},
+ "devops.BranchPipeline.parameters": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "choices": {
+ "description": "choices",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipeline.parameters.choices"
+ }
+ },
+ "defaultParameterValue": {
+ "$ref": "#/definitions/.defaultParameterValue"
+ },
+ "description": {
+ "description": "description",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipeline.parameters.choices": {},
+ "devops.BranchPipelineRunNodes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.actions"
+ }
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.causeOfBlockage"
+ },
+ "displayDescription": {
+ "description": "display description",
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.displayDescription"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.edges"
+ }
+ },
+ "firstParent": {
+ "description": "first parent resource",
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.firstParent"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "input": {
+ "description": "the action should user input",
+ "$ref": "#/definitions/devops.Input"
+ },
+ "restartable": {
+ "description": "restartable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS. e.g. SUCCESS",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "steps": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.steps"
+ }
+ },
+ "type": {
+ "description": "source type, e.g. \"WorkflowRun\"",
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipelineRunNodes.actions": {},
+ "devops.BranchPipelineRunNodes.causeOfBlockage": {},
+ "devops.BranchPipelineRunNodes.displayDescription": {},
+ "devops.BranchPipelineRunNodes.edges": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "type": {
+ "description": "source type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipelineRunNodes.firstParent": {},
+ "devops.BranchPipelineRunNodes.steps": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "references the reachable path to this resource",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.steps.actions"
+ }
+ },
+ "displayDescription": {
+ "description": "display description",
+ "$ref": "#/definitions/devops.BranchPipelineRunNodes.steps.displayDescription"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "input": {
+ "description": "the action should user input",
+ "$ref": "#/definitions/devops.Input"
+ },
+ "result": {
+ "description": "result",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "source type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipelineRunNodes.steps.actions": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "_links": {
+ "$ref": "#/definitions/._links"
+ },
+ "urlName": {
+ "type": "string"
+ }
+ }
+ },
+ "devops.BranchPipelineRunNodes.steps.displayDescription": {},
+ "devops.CheckCronRes": {
+ "properties": {
+ "lastTime": {
+ "description": "last run time.",
+ "type": "string"
+ },
+ "message": {
+ "description": "message",
+ "type": "string"
+ },
+ "nextTime": {
+ "description": "next run time.",
+ "type": "string"
+ },
+ "result": {
+ "description": "result e.g. ok, error",
+ "type": "string"
+ }
+ }
+ },
+ "devops.CheckPlayload": {
+ "properties": {
+ "abort": {
+ "description": "abort or not",
+ "type": "boolean"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "parameters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.CheckPlayloadParameters"
+ }
+ }
+ }
+ },
+ "devops.CheckPlayloadParameters": {
+ "properties": {
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "value": {
+ "description": "value",
+ "$ref": "#/definitions/devops.CheckPlayloadParameters.value"
+ }
+ }
+ },
+ "devops.CheckPlayloadParameters.value": {},
+ "devops.CheckScript": {
+ "properties": {
+ "column": {
+ "description": "column e.g. 0",
+ "type": "integer",
+ "format": "int32"
+ },
+ "line": {
+ "description": "line e.g. 0",
+ "type": "integer",
+ "format": "int32"
+ },
+ "message": {
+ "description": "message e.g. unexpected char: '#'",
+ "type": "string"
+ },
+ "status": {
+ "description": "status e.g. fail",
+ "type": "string"
+ }
+ }
+ },
+ "devops.CreateScmServerReq": {
+ "properties": {
+ "apiUrl": {
+ "description": "url of scm server",
+ "type": "string"
+ },
+ "name": {
+ "description": "name of scm server",
+ "type": "string"
+ }
+ }
+ },
+ "devops.Credential": {
+ "required": [
+ "id",
+ "type"
+ ],
+ "properties": {
+ "description": {
+ "description": "Credential's description'",
+ "type": "string"
+ },
+ "display_name": {
+ "description": "Credential's display name",
+ "type": "string"
+ },
+ "domain": {
+ "description": "Credential's domain,In ks we only use the default domain, default '_''",
+ "type": "string"
+ },
+ "fingerprint": {
+ "description": "usage of the Credential",
+ "$ref": "#/definitions/devops.Credential.fingerprint"
+ },
+ "id": {
+ "description": "Id of Credential, e.g. dockerhub-id",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of Credential, e.g. ssh/kubeconfig",
+ "type": "string"
+ }
+ }
+ },
+ "devops.Credential.fingerprint": {
+ "properties": {
+ "file_name": {
+ "description": "Credential's display name and description",
+ "type": "string"
+ },
+ "hash": {
+ "description": "Credential's hash",
+ "type": "string"
+ },
+ "usage": {
+ "description": "all usage of Credential",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Credential.fingerprint.usage"
+ }
+ }
+ }
+ },
+ "devops.Credential.fingerprint.usage": {
+ "properties": {
+ "name": {
+ "description": "pipeline full name",
+ "type": "string"
+ },
+ "ranges": {
+ "description": "The build number of all pipelines that use this credential",
+ "$ref": "#/definitions/.ranges"
+ }
+ }
+ },
+ "devops.CronData": {
+ "required": [
+ "cron"
+ ],
+ "properties": {
+ "cron": {
+ "description": "Cron script data.",
+ "type": "string"
+ },
+ "pipelineName": {
+ "description": "Pipeline name, if pipeline haven't created, not required'",
+ "type": "string"
+ }
+ }
+ },
+ "devops.Crumb": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "crumb": {
+ "description": "crumb data",
+ "type": "string"
+ },
+ "crumbRequestField": {
+ "description": "crumb request field",
+ "type": "string"
+ }
+ }
+ },
+ "devops.Input": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/devops.Input._links"
+ },
+ "id": {
+ "description": "the id of check action",
+ "type": "string"
+ },
+ "message": {
+ "description": "the message of check action",
+ "type": "string"
+ },
+ "ok": {
+ "description": "check status. e.g. \"Proceed\"",
+ "type": "string"
+ },
+ "parameters": {
+ "description": "the parameters of check action",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Input.parameters"
+ }
+ },
+ "submitter": {
+ "description": "check submitter",
+ "$ref": "#/definitions/devops.Input.submitter"
+ }
+ }
+ },
+ "devops.Input._links": {
+ "properties": {
+ "self": {
+ "$ref": "#/definitions/devops.Input._links.self"
+ }
+ }
+ },
+ "devops.Input._links.self": {
+ "properties": {
+ "_class": {
+ "type": "string"
+ },
+ "href": {
+ "type": "string"
+ }
+ }
+ },
+ "devops.Input.parameters": {},
+ "devops.Input.submitter": {},
+ "devops.NodeSteps": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps.actions"
+ }
+ },
+ "displayDescription": {
+ "description": "display description",
+ "type": "string"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "durationInMillis": {
+ "description": "duration time in mullis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "input": {
+ "description": "the action should user input",
+ "$ref": "#/definitions/devops.Input"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of starts",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. SKIPPED",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.NodeSteps.actions": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "urlName": {
+ "description": "url name",
+ "type": "string"
+ }
+ }
+ },
+ "devops.NodesDetail": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail.actions"
+ }
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/devops.NodesDetail.causeOfBlockage"
+ },
+ "displayDescription": {
+ "description": "display description",
+ "$ref": "#/definitions/devops.NodesDetail.displayDescription"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "durationInMillis": {
+ "description": "duration time in mullis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "edges": {
+ "description": "edges",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodesDetail.edges"
+ }
+ },
+ "firstParent": {
+ "description": "first parent",
+ "$ref": "#/definitions/devops.NodesDetail.firstParent"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "input": {
+ "description": "the action should user input",
+ "$ref": "#/definitions/devops.Input"
+ },
+ "restartable": {
+ "description": "restartable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. FINISHED",
+ "type": "string"
+ },
+ "steps": {
+ "description": "steps",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.NodeSteps"
+ }
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.NodesDetail.actions": {},
+ "devops.NodesDetail.causeOfBlockage": {},
+ "devops.NodesDetail.displayDescription": {},
+ "devops.NodesDetail.edges": {},
+ "devops.NodesDetail.firstParent": {},
+ "devops.OrgRepo": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "repositories": {
+ "$ref": "#/definitions/.repositories"
+ }
+ }
+ },
+ "devops.Pipeline": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource.",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Pipeline.actions"
+ }
+ },
+ "branchNames": {
+ "description": "branch names",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "disabled": {
+ "description": "disable or not, if disabled, can not do any action.",
+ "$ref": "#/definitions/devops.Pipeline.disabled"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time, unit is millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "fullDisplayName": {
+ "description": "full display name",
+ "type": "string"
+ },
+ "fullName": {
+ "description": "full name",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "numberOfFailingBranches": {
+ "description": "number of failing branches",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberOfFailingPullRequests": {
+ "description": "number of failing pull requests",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberOfFolders": {
+ "description": "number of folders",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberOfPipelines": {
+ "description": "number of pipelines",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberOfSuccessfulBranches": {
+ "description": "number of successful pull requests",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberOfSuccessfulPullRequests": {
+ "description": "number of successful pull requests",
+ "type": "integer",
+ "format": "int32"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "parameters": {
+ "description": "parameters of pipeline, a pipeline can define list of parameters pipeline job expects.",
+ "$ref": "#/definitions/devops.Pipeline.parameters"
+ },
+ "permissions": {
+ "description": "permissions",
+ "$ref": "#/definitions/.permissions"
+ },
+ "pipelineFolderNames": {
+ "description": "pipeline folder names",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Pipeline.pipelineFolderNames"
+ }
+ },
+ "scmSource": {
+ "$ref": "#/definitions/.scmSource"
+ },
+ "totalNumberOfBranches": {
+ "description": "total number of branches",
+ "type": "integer",
+ "format": "int32"
+ },
+ "totalNumberOfPullRequests": {
+ "description": "total number of pull requests",
+ "type": "integer",
+ "format": "int32"
+ },
+ "weatherScore": {
+ "description": "the score to description the result of pipeline activity",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "devops.Pipeline.actions": {},
+ "devops.Pipeline.disabled": {},
+ "devops.Pipeline.parameters": {},
+ "devops.Pipeline.pipelineFolderNames": {},
+ "devops.PipelineBranch": {},
+ "devops.PipelineList": {
+ "required": [
+ "items",
+ "total_count"
+ ],
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.Pipeline"
+ }
+ },
+ "total_count": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "devops.PipelineRun": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRun.actions"
+ }
+ },
+ "artifactsZipFile": {
+ "description": "the artifacts zip file",
+ "$ref": "#/definitions/devops.PipelineRun.artifactsZipFile"
+ },
+ "branch": {
+ "description": "branch",
+ "$ref": "#/definitions/devops.PipelineRun.branch"
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/devops.PipelineRun.causeOfBlockage"
+ },
+ "causes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRun.causes"
+ }
+ },
+ "changeSet": {
+ "description": "changeset information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRun.changeSet"
+ }
+ },
+ "commitId": {
+ "description": "commit id",
+ "$ref": "#/definitions/devops.PipelineRun.commitId"
+ },
+ "commitUrl": {
+ "description": "commit url",
+ "$ref": "#/definitions/devops.PipelineRun.commitUrl"
+ },
+ "description": {
+ "description": "description",
+ "$ref": "#/definitions/devops.PipelineRun.description"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "enQueueTime": {
+ "description": "the time of enter the queue",
+ "type": "string"
+ },
+ "endTime": {
+ "description": "the time of end",
+ "type": "string"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "$ref": "#/definitions/devops.PipelineRun.name"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "pipeline": {
+ "description": "the name of pipeline",
+ "type": "string"
+ },
+ "pullRequest": {
+ "description": "pull request",
+ "$ref": "#/definitions/devops.PipelineRun.pullRequest"
+ },
+ "replayable": {
+ "description": "replayable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "runSummary": {
+ "description": "pipeline run summary",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.PipelineRun.actions": {},
+ "devops.PipelineRun.artifactsZipFile": {},
+ "devops.PipelineRun.branch": {},
+ "devops.PipelineRun.causeOfBlockage": {},
+ "devops.PipelineRun.causes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "shortDescription": {
+ "description": "short description",
+ "type": "string"
+ },
+ "userId": {
+ "description": "user id",
+ "type": "string"
+ },
+ "userName": {
+ "description": "user name",
+ "type": "string"
+ }
+ }
+ },
+ "devops.PipelineRun.changeSet": {},
+ "devops.PipelineRun.commitId": {},
+ "devops.PipelineRun.commitUrl": {},
+ "devops.PipelineRun.description": {},
+ "devops.PipelineRun.name": {},
+ "devops.PipelineRun.pullRequest": {},
+ "devops.PipelineRunList": {
+ "required": [
+ "items",
+ "totalItems"
+ ],
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRun"
+ }
+ },
+ "totalItems": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "devops.PipelineRunNodes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRunNodes.actions"
+ }
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/devops.PipelineRunNodes.causeOfBlockage"
+ },
+ "displayDescription": {
+ "description": "display description",
+ "$ref": "#/definitions/devops.PipelineRunNodes.displayDescription"
+ },
+ "displayName": {
+ "description": "display name",
+ "type": "string"
+ },
+ "durationInMillis": {
+ "description": "duration time in mullis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "edges": {
+ "description": "edges",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.PipelineRunNodes.edges"
+ }
+ },
+ "firstParent": {
+ "description": "first parent",
+ "$ref": "#/definitions/devops.PipelineRunNodes.firstParent"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "input": {
+ "description": "the action should user input",
+ "$ref": "#/definitions/devops.Input"
+ },
+ "restartable": {
+ "description": "restartable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. FINISHED",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.PipelineRunNodes.actions": {},
+ "devops.PipelineRunNodes.causeOfBlockage": {},
+ "devops.PipelineRunNodes.displayDescription": {},
+ "devops.PipelineRunNodes.edges": {},
+ "devops.PipelineRunNodes.firstParent": {},
+ "devops.ReplayPipeline": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.ReplayPipeline.actions"
+ }
+ },
+ "artifactsZipFile": {
+ "description": "the artifacts zip file",
+ "$ref": "#/definitions/devops.ReplayPipeline.artifactsZipFile"
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "type": "string"
+ },
+ "causes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.ReplayPipeline.causes"
+ }
+ },
+ "changeSet": {
+ "description": "changeset information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.ReplayPipeline.changeSet"
+ }
+ },
+ "description": {
+ "description": "description",
+ "$ref": "#/definitions/devops.ReplayPipeline.description"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "$ref": "#/definitions/devops.ReplayPipeline.durationInMillis"
+ },
+ "enQueueTime": {
+ "description": "the time of enter the queue",
+ "$ref": "#/definitions/devops.ReplayPipeline.enQueueTime"
+ },
+ "endTime": {
+ "description": "the time of end",
+ "$ref": "#/definitions/devops.ReplayPipeline.endTime"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time, unit is millis",
+ "$ref": "#/definitions/devops.ReplayPipeline.estimatedDurationInMillis"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "$ref": "#/definitions/devops.ReplayPipeline.name"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "pipeline": {
+ "description": "pipeline",
+ "type": "string"
+ },
+ "queueId": {
+ "description": "queue id",
+ "type": "string"
+ },
+ "replayable": {
+ "description": "replayable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "runSummary": {
+ "description": "pipeline run summary",
+ "$ref": "#/definitions/devops.ReplayPipeline.runSummary"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "$ref": "#/definitions/devops.ReplayPipeline.startTime"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ReplayPipeline.actions": {},
+ "devops.ReplayPipeline.artifactsZipFile": {},
+ "devops.ReplayPipeline.causes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "shortDescription": {
+ "description": "short description",
+ "type": "string"
+ },
+ "userId": {
+ "description": "user id",
+ "type": "string"
+ },
+ "userName": {
+ "description": "user name",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ReplayPipeline.changeSet": {},
+ "devops.ReplayPipeline.description": {},
+ "devops.ReplayPipeline.durationInMillis": {},
+ "devops.ReplayPipeline.enQueueTime": {},
+ "devops.ReplayPipeline.endTime": {},
+ "devops.ReplayPipeline.estimatedDurationInMillis": {},
+ "devops.ReplayPipeline.name": {},
+ "devops.ReplayPipeline.runSummary": {},
+ "devops.ReplayPipeline.startTime": {},
+ "devops.ReqJenkinsfile": {
+ "properties": {
+ "jenkinsfile": {
+ "description": "jenkinsfile",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ReqJson": {
+ "properties": {
+ "json": {
+ "description": "json data",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ReqScript": {
+ "properties": {
+ "value": {
+ "description": "Pipeline script data",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ResJenkinsfile": {
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/.data"
+ },
+ "status": {
+ "description": "status e.g. ok",
+ "type": "string"
+ }
+ }
+ },
+ "devops.ResJson": {
+ "properties": {
+ "data": {
+ "$ref": "#/definitions/.data"
+ },
+ "status": {
+ "description": "status e.g. ok",
+ "type": "string"
+ }
+ }
+ },
+ "devops.RunPayload": {
+ "properties": {
+ "parameters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.RunPayload.parameters"
+ }
+ }
+ }
+ },
+ "devops.RunPayload.parameters": {
+ "properties": {
+ "name": {
+ "description": "name",
+ "type": "string"
+ },
+ "value": {
+ "description": "value",
+ "type": "string"
+ }
+ }
+ },
+ "devops.RunPipeline": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.RunPipeline.actions"
+ }
+ },
+ "artifactsZipFile": {
+ "description": "the artifacts zip file",
+ "$ref": "#/definitions/devops.RunPipeline.artifactsZipFile"
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "type": "string"
+ },
+ "causes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.RunPipeline.causes"
+ }
+ },
+ "changeSet": {
+ "description": "changeset information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.RunPipeline.changeSet"
+ }
+ },
+ "description": {
+ "description": "description",
+ "$ref": "#/definitions/devops.RunPipeline.description"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "$ref": "#/definitions/devops.RunPipeline.durationInMillis"
+ },
+ "enQueueTime": {
+ "description": "the time of enter the queue",
+ "$ref": "#/definitions/devops.RunPipeline.enQueueTime"
+ },
+ "endTime": {
+ "description": "the time of end",
+ "$ref": "#/definitions/devops.RunPipeline.endTime"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time in millis",
+ "$ref": "#/definitions/devops.RunPipeline.estimatedDurationInMillis"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "$ref": "#/definitions/devops.RunPipeline.name"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "pipeline": {
+ "description": "pipeline",
+ "type": "string"
+ },
+ "queueId": {
+ "description": "queue id",
+ "type": "string"
+ },
+ "replayable": {
+ "description": "replayable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "runSummary": {
+ "description": "pipeline run summary",
+ "$ref": "#/definitions/devops.RunPipeline.runSummary"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "$ref": "#/definitions/devops.RunPipeline.startTime"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.RunPipeline.actions": {},
+ "devops.RunPipeline.artifactsZipFile": {},
+ "devops.RunPipeline.causes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "shortDescription": {
+ "description": "short description",
+ "type": "string"
+ },
+ "userId": {
+ "description": "user id",
+ "type": "string"
+ },
+ "userName": {
+ "description": "user name",
+ "type": "string"
+ }
+ }
+ },
+ "devops.RunPipeline.changeSet": {},
+ "devops.RunPipeline.description": {},
+ "devops.RunPipeline.durationInMillis": {},
+ "devops.RunPipeline.enQueueTime": {},
+ "devops.RunPipeline.endTime": {},
+ "devops.RunPipeline.estimatedDurationInMillis": {},
+ "devops.RunPipeline.name": {},
+ "devops.RunPipeline.runSummary": {},
+ "devops.RunPipeline.startTime": {},
+ "devops.SCMOrg": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "avatar": {
+ "description": "the url of organization avatar",
+ "type": "string"
+ },
+ "jenkinsOrganizationPipeline": {
+ "description": "weather or not already have jenkins pipeline.",
+ "type": "boolean"
+ },
+ "name": {
+ "description": "organization name",
+ "type": "string"
+ }
+ }
+ },
+ "devops.SCMServer": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "apiUrl": {
+ "description": "url of scm server",
+ "type": "string"
+ },
+ "id": {
+ "description": "server id of scm server",
+ "type": "string"
+ },
+ "name": {
+ "description": "name of scm server",
+ "type": "string"
+ }
+ }
+ },
+ "devops.StopPipeline": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "_links": {
+ "description": "references the reachable path to this resource",
+ "$ref": "#/definitions/._links"
+ },
+ "actions": {
+ "description": "the list of all actions.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.StopPipeline.actions"
+ }
+ },
+ "artifactsZipFile": {
+ "description": "the artifacts zip file",
+ "$ref": "#/definitions/devops.StopPipeline.artifactsZipFile"
+ },
+ "branch": {
+ "$ref": "#/definitions/.branch"
+ },
+ "causeOfBlockage": {
+ "description": "the cause of blockage",
+ "$ref": "#/definitions/devops.StopPipeline.causeOfBlockage"
+ },
+ "causes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.StopPipeline.causes"
+ }
+ },
+ "changeSet": {
+ "description": "changeset information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/devops.StopPipeline.changeSet"
+ }
+ },
+ "commitId": {
+ "description": "commit id",
+ "type": "string"
+ },
+ "commitUrl": {
+ "description": "commit url",
+ "$ref": "#/definitions/devops.StopPipeline.commitUrl"
+ },
+ "description": {
+ "description": "description",
+ "$ref": "#/definitions/devops.StopPipeline.description"
+ },
+ "durationInMillis": {
+ "description": "duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "enQueueTime": {
+ "description": "the time of enter the queue",
+ "type": "string"
+ },
+ "endTime": {
+ "description": "the time of end",
+ "type": "string"
+ },
+ "estimatedDurationInMillis": {
+ "description": "estimated duration time in millis",
+ "type": "integer",
+ "format": "int32"
+ },
+ "id": {
+ "description": "id",
+ "type": "string"
+ },
+ "name": {
+ "description": "name",
+ "$ref": "#/definitions/devops.StopPipeline.name"
+ },
+ "organization": {
+ "description": "the name of organization",
+ "type": "string"
+ },
+ "pipeline": {
+ "description": "pipeline",
+ "type": "string"
+ },
+ "pullRequest": {
+ "description": "pull request",
+ "$ref": "#/definitions/devops.StopPipeline.pullRequest"
+ },
+ "replayable": {
+ "description": "replayable or not",
+ "type": "boolean"
+ },
+ "result": {
+ "description": "the result of pipeline run. e.g. SUCCESS",
+ "type": "string"
+ },
+ "runSummary": {
+ "description": "pipeline run summary",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "the time of start",
+ "type": "string"
+ },
+ "state": {
+ "description": "run state. e.g. RUNNING",
+ "type": "string"
+ },
+ "type": {
+ "description": "type",
+ "type": "string"
+ }
+ }
+ },
+ "devops.StopPipeline.actions": {},
+ "devops.StopPipeline.artifactsZipFile": {},
+ "devops.StopPipeline.causeOfBlockage": {},
+ "devops.StopPipeline.causes": {
+ "properties": {
+ "_class": {
+ "description": "It’s a fully qualified name and is an identifier of the producer of this resource's capability.",
+ "type": "string"
+ },
+ "shortDescription": {
+ "description": "short description",
+ "type": "string"
+ }
+ }
+ },
+ "devops.StopPipeline.changeSet": {},
+ "devops.StopPipeline.commitUrl": {},
+ "devops.StopPipeline.description": {},
+ "devops.StopPipeline.name": {},
+ "devops.StopPipeline.pullRequest": {},
+ "devops.Validates": {
+ "properties": {
+ "credentialId": {
+ "description": "the id of credential",
+ "type": "string"
+ }
+ }
+ },
+ "errors.Error": {
+ "required": [
+ "message"
+ ],
+ "properties": {
+ "message": {
+ "description": "error message",
+ "type": "string"
+ }
+ }
+ },
+ "events.Bucket": {
+ "required": [
+ "time",
+ "count"
+ ],
+ "properties": {
+ "count": {
+ "description": "total number of events at intervals",
+ "type": "integer",
+ "format": "int64"
+ },
+ "time": {
+ "description": "timestamp",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "events.Events": {
+ "required": [
+ "total",
+ "records"
+ ],
+ "properties": {
+ "records": {
+ "description": "actual array of results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Event"
+ }
+ },
+ "total": {
+ "description": "total number of matched results",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "events.Histogram": {
+ "required": [
+ "total",
+ "buckets"
+ ],
+ "properties": {
+ "buckets": {
+ "description": "actual array of histogram results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/events.Bucket"
+ }
+ },
+ "total": {
+ "description": "total number of events",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "events.Statistics": {
+ "required": [
+ "resources",
+ "events"
+ ],
+ "properties": {
+ "events": {
+ "description": "total number of events",
+ "type": "integer",
+ "format": "int64"
+ },
+ "resources": {
+ "description": "total number of resources",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "git.AuthInfo": {
+ "required": [
+ "remoteUrl"
+ ],
+ "properties": {
+ "remoteUrl": {
+ "description": "git server url",
+ "type": "string"
+ },
+ "secretRef": {
+ "description": "auth secret reference",
+ "$ref": "#/definitions/v1.SecretReference"
+ }
+ }
+ },
+ "iam.PasswordReset": {
+ "required": [
+ "currentPassword",
+ "password"
+ ],
+ "properties": {
+ "currentPassword": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ }
+ }
+ },
+ "inf.Dec": {
+ "required": [
+ "unscaled",
+ "scale"
+ ],
+ "properties": {
+ "scale": {
+ "$ref": "#/definitions/inf.Scale"
+ },
+ "unscaled": {
+ "$ref": "#/definitions/big.Int"
+ }
+ }
+ },
+ "inf.Scale": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "logging.Bucket": {
+ "required": [
+ "time",
+ "count"
+ ],
+ "properties": {
+ "count": {
+ "description": "total number of logs at intervals",
+ "type": "integer",
+ "format": "int64"
+ },
+ "time": {
+ "description": "timestamp",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "logging.Histogram": {
+ "required": [
+ "total",
+ "histograms"
+ ],
+ "properties": {
+ "histograms": {
+ "description": "actual array of histogram results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/logging.Bucket"
+ }
+ },
+ "total": {
+ "description": "total number of logs",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "logging.Logs": {
+ "required": [
+ "total"
+ ],
+ "properties": {
+ "records": {
+ "description": "actual array of results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/logging.Record"
+ }
+ },
+ "total": {
+ "description": "total number of matched results",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "logging.Record": {
+ "properties": {
+ "container": {
+ "description": "container name",
+ "type": "string"
+ },
+ "log": {
+ "description": "log message",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "namespace",
+ "type": "string"
+ },
+ "pod": {
+ "description": "pod name",
+ "type": "string"
+ },
+ "time": {
+ "description": "log timestamp",
+ "type": "string"
+ }
+ }
+ },
+ "logging.Statistics": {
+ "required": [
+ "containers",
+ "logs"
+ ],
+ "properties": {
+ "containers": {
+ "description": "total number of containers",
+ "type": "integer",
+ "format": "int64"
+ },
+ "logs": {
+ "description": "total number of logs",
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "model.LabelValue": {},
+ "model.SamplePair": {
+ "required": [
+ "Timestamp",
+ "Value"
+ ],
+ "properties": {
+ "Timestamp": {
+ "type": "string"
+ },
+ "Value": {
+ "type": "string"
+ }
+ }
+ },
+ "model.SampleStream": {
+ "required": [
+ "metric",
+ "values"
+ ],
+ "properties": {
+ "metric": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/model.LabelValue"
+ }
+ },
+ "values": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/model.SamplePair"
+ }
+ }
+ }
+ },
+ "models.AppHealth": {
+ "required": [
+ "workloadStatuses",
+ "requests"
+ ],
+ "properties": {
+ "requests": {
+ "$ref": "#/definitions/models.RequestHealth"
+ },
+ "workloadStatuses": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/models.WorkloadStatus"
+ }
+ }
+ }
+ },
+ "models.PageableResponse": {
+ "required": [
+ "items",
+ "total_count"
+ ],
+ "properties": {
+ "items": {
+ "description": "paging data",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/models.PageableResponse.items"
+ }
+ },
+ "total_count": {
+ "description": "total count",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "models.PageableResponse.items": {},
+ "models.PodInfo": {
+ "required": [
+ "namespace",
+ "pod",
+ "container"
+ ],
+ "properties": {
+ "container": {
+ "description": "container name",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "namespace",
+ "type": "string"
+ },
+ "pod": {
+ "description": "pod name",
+ "type": "string"
+ }
+ }
+ },
+ "models.RequestHealth": {
+ "required": [
+ "inboundErrorRate",
+ "outboundErrorRate",
+ "inboundRequestRate",
+ "outboundRequestRate",
+ "errorRatio",
+ "inboundErrorRatio",
+ "outboundErrorRatio"
+ ],
+ "properties": {
+ "errorRatio": {
+ "type": "number",
+ "format": "double"
+ },
+ "inboundErrorRate": {
+ "type": "number",
+ "format": "double"
+ },
+ "inboundErrorRatio": {
+ "type": "number",
+ "format": "double"
+ },
+ "inboundRequestRate": {
+ "type": "number",
+ "format": "double"
+ },
+ "outboundErrorRate": {
+ "type": "number",
+ "format": "double"
+ },
+ "outboundErrorRatio": {
+ "type": "number",
+ "format": "double"
+ },
+ "outboundRequestRate": {
+ "type": "number",
+ "format": "double"
+ }
+ }
+ },
+ "models.WorkloadStatus": {
+ "required": [
+ "name",
+ "replicas",
+ "available"
+ ],
+ "properties": {
+ "available": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string"
+ },
+ "replicas": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "monitoring.Metadata": {
+ "required": [
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "description": "actual array of results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/monitoring.Metadata"
+ }
+ }
+ }
+ },
+ "monitoring.Metric": {
+ "properties": {
+ "data": {
+ "description": "actual metric result",
+ "$ref": "#/definitions/monitoring.MetricData"
+ },
+ "error": {
+ "type": "string"
+ },
+ "metric_name": {
+ "description": "metric name, eg. scheduler_up_sum",
+ "type": "string"
+ }
+ }
+ },
+ "monitoring.MetricData": {
+ "properties": {
+ "result": {
+ "description": "metric data including labels, time series and values",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/monitoring.MetricValue"
+ }
+ },
+ "resultType": {
+ "description": "result type, one of matrix, vector",
+ "type": "string"
+ }
+ }
+ },
+ "monitoring.MetricLabelSet": {
+ "required": [
+ "data"
+ ],
+ "properties": {
+ "data": {
+ "description": "actual array of results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/monitoring.MetricLabelSet.data"
+ }
+ }
+ }
+ },
+ "monitoring.MetricLabelSet.data": {},
+ "monitoring.MetricValue": {
+ "properties": {
+ "metric": {
+ "description": "time series labels",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "value": {
+ "description": "time series, values of vector type",
+ "type": "string"
+ },
+ "values": {
+ "description": "time series, values of matrix type",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/monitoring.Point"
+ }
+ }
+ }
+ },
+ "monitoring.Metrics": {
+ "required": [
+ "results"
+ ],
+ "properties": {
+ "page": {
+ "description": "current page returned",
+ "type": "integer",
+ "format": "int32"
+ },
+ "results": {
+ "description": "actual array of results",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/monitoring.Metric"
+ }
+ },
+ "total_item": {
+ "description": "page size",
+ "type": "integer",
+ "format": "int32"
+ },
+ "total_page": {
+ "description": "total number of pages",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "monitoring.Point": {},
+ "openpitrix.App": {
+ "required": [
+ "category_set"
+ ],
+ "properties": {
+ "abstraction": {
+ "type": "string"
+ },
+ "active": {
+ "type": "boolean"
+ },
+ "app_id": {
+ "type": "string"
+ },
+ "app_version_types": {
+ "type": "string"
+ },
+ "category_set": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/openpitrix.ResourceCategory"
+ }
+ },
+ "chart_name": {
+ "type": "string"
+ },
+ "cluster_total": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "company_join_time": {
+ "type": "string"
+ },
+ "company_name": {
+ "type": "string"
+ },
+ "company_profile": {
+ "type": "string"
+ },
+ "company_website": {
+ "type": "string"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "home": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "isv": {
+ "type": "string"
+ },
+ "keywords": {
+ "type": "string"
+ },
+ "latest_app_version": {
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ },
+ "maintainers": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "readme": {
+ "type": "string"
+ },
+ "repo_id": {
+ "type": "string"
+ },
+ "screenshots": {
+ "type": "string"
+ },
+ "sources": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "tos": {
+ "type": "string"
+ },
+ "update_time": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.AppVersion": {
+ "properties": {
+ "active": {
+ "type": "boolean"
+ },
+ "app_id": {
+ "type": "string"
+ },
+ "cluster_total": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "home": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "keywords": {
+ "type": "string"
+ },
+ "maintainers": {
+ "type": "string"
+ },
+ "message": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "package_name": {
+ "type": "string"
+ },
+ "readme": {
+ "type": "string"
+ },
+ "review_id": {
+ "type": "string"
+ },
+ "screenshots": {
+ "type": "string"
+ },
+ "sequence": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "sources": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ },
+ "update_time": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.AppVersionAudit": {
+ "properties": {
+ "app_id": {
+ "type": "string"
+ },
+ "app_name": {
+ "type": "string"
+ },
+ "message": {
+ "type": "string"
+ },
+ "operator": {
+ "type": "string"
+ },
+ "operator_type": {
+ "type": "string"
+ },
+ "review_id": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ },
+ "version_name": {
+ "type": "string"
+ },
+ "version_type": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.AppVersionReview": {
+ "properties": {
+ "app_id": {
+ "type": "string"
+ },
+ "app_name": {
+ "type": "string"
+ },
+ "phase": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/openpitrix.AppVersionReviewPhase"
+ }
+ },
+ "review_id": {
+ "type": "string"
+ },
+ "reviewer": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ },
+ "version_name": {
+ "type": "string"
+ },
+ "version_type": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.AppVersionReviewPhase": {
+ "properties": {
+ "message": {
+ "type": "string"
+ },
+ "operator": {
+ "type": "string"
+ },
+ "operator_type": {
+ "type": "string"
+ },
+ "review_time": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.Application": {
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "app": {
+ "description": "application template info",
+ "$ref": "#/definitions/openpitrix.App"
+ },
+ "cluster": {
+ "description": "application cluster info",
+ "$ref": "#/definitions/openpitrix.Cluster"
+ },
+ "ingresses": {
+ "description": "application ingresses",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.Ingress"
+ }
+ },
+ "name": {
+ "description": "application name",
+ "type": "string"
+ },
+ "services": {
+ "description": "application services",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Service"
+ }
+ },
+ "version": {
+ "description": "application template version info",
+ "$ref": "#/definitions/openpitrix.AppVersion"
+ },
+ "workloads": {
+ "description": "application workloads",
+ "$ref": "#/definitions/openpitrix.workLoads"
+ }
+ }
+ },
+ "openpitrix.Attachment": {
+ "properties": {
+ "attachment_content": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/strfmt.Base64"
+ }
+ },
+ "attachment_id": {
+ "type": "string"
+ },
+ "create_time": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.Category": {
+ "properties": {
+ "app_total": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "category_id": {
+ "type": "string"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "locale": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "update_time": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.Cluster": {
+ "properties": {
+ "additional_info": {
+ "type": "string"
+ },
+ "app_id": {
+ "type": "string"
+ },
+ "cluster_id": {
+ "type": "string"
+ },
+ "cluster_type": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "debug": {
+ "type": "boolean"
+ },
+ "description": {
+ "type": "string"
+ },
+ "endpoints": {
+ "type": "string"
+ },
+ "env": {
+ "type": "string"
+ },
+ "frontgate_id": {
+ "type": "string"
+ },
+ "global_uuid": {
+ "type": "string"
+ },
+ "metadata_root_access": {
+ "type": "boolean"
+ },
+ "name": {
+ "type": "string"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "runtime_id": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "subnet_id": {
+ "type": "string"
+ },
+ "transition_status": {
+ "type": "string"
+ },
+ "upgrade_status": {
+ "type": "string"
+ },
+ "upgrade_time": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ },
+ "vpc_id": {
+ "type": "string"
+ },
+ "zone": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateAppRequest": {
+ "properties": {
+ "icon": {
+ "type": "string"
+ },
+ "isv": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "version_name": {
+ "type": "string"
+ },
+ "version_package": {
+ "type": "string"
+ },
+ "version_type": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateAppResponse": {
+ "properties": {
+ "app_id": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateAppVersionRequest": {
+ "properties": {
+ "app_id": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "package": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateAppVersionResponse": {
+ "properties": {
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateCategoryRequest": {
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "locale": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateCategoryResponse": {
+ "properties": {
+ "category_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateClusterRequest": {
+ "required": [
+ "advanced_param"
+ ],
+ "properties": {
+ "advanced_param": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "app_id": {
+ "type": "string"
+ },
+ "conf": {
+ "type": "string"
+ },
+ "runtime_id": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateRepoRequest": {
+ "required": [
+ "providers"
+ ],
+ "properties": {
+ "app_default_status": {
+ "type": "string"
+ },
+ "category_id": {
+ "type": "string"
+ },
+ "credential": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "providers": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "type": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ },
+ "visibility": {
+ "type": "string"
+ },
+ "workspace": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.CreateRepoResponse": {
+ "properties": {
+ "repo_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.GetAppVersionPackageFilesResponse": {
+ "properties": {
+ "files": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/strfmt.Base64"
+ }
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.GetAppVersionPackageResponse": {
+ "properties": {
+ "app_id": {
+ "type": "string"
+ },
+ "package": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.ModifyAppVersionRequest": {
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "package": {
+ "type": "string"
+ },
+ "package_files": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/openpitrix.ModifyAppVersionRequest.package_files"
+ }
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.ModifyAppVersionRequest.package_files": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "integer",
+ "format": "int8"
+ }
+ }
+ },
+ "openpitrix.ModifyCategoryRequest": {
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "locale": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.ModifyClusterAttributesRequest": {
+ "properties": {
+ "cluster_id": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.ModifyRepoRequest": {
+ "required": [
+ "providers"
+ ],
+ "properties": {
+ "app_default_status": {
+ "type": "string"
+ },
+ "category_id": {
+ "type": "string"
+ },
+ "credential": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "providers": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "type": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ },
+ "visibility": {
+ "type": "string"
+ },
+ "workspace": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.Repo": {
+ "required": [
+ "category_set",
+ "labels",
+ "providers",
+ "selectors"
+ ],
+ "properties": {
+ "app_default_status": {
+ "type": "string"
+ },
+ "category_set": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/openpitrix.ResourceCategory"
+ }
+ },
+ "controller": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "credential": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "labels": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/openpitrix.RepoLabel"
+ }
+ },
+ "name": {
+ "type": "string"
+ },
+ "owner": {
+ "type": "string"
+ },
+ "providers": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "repo_id": {
+ "type": "string"
+ },
+ "selectors": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/openpitrix.RepoSelector"
+ }
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ },
+ "visibility": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.RepoActionRequest": {
+ "required": [
+ "action"
+ ],
+ "properties": {
+ "action": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.RepoLabel": {
+ "properties": {
+ "create_time": {
+ "type": "string"
+ },
+ "label_key": {
+ "type": "string"
+ },
+ "label_value": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.RepoSelector": {
+ "properties": {
+ "create_time": {
+ "type": "string"
+ },
+ "selector_key": {
+ "type": "string"
+ },
+ "selector_value": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.ResourceCategory": {
+ "properties": {
+ "category_id": {
+ "type": "string"
+ },
+ "create_time": {
+ "type": "string"
+ },
+ "locale": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "status_time": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.UpgradeClusterRequest": {
+ "required": [
+ "cluster_id",
+ "advanced_param"
+ ],
+ "properties": {
+ "advanced_param": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "cluster_id": {
+ "type": "string"
+ },
+ "conf": {
+ "type": "string"
+ },
+ "runtime_id": {
+ "type": "string"
+ },
+ "version_id": {
+ "type": "string"
+ }
+ }
+ },
+ "openpitrix.workLoads": {
+ "properties": {
+ "daemonsets": {
+ "description": "daemonset list",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.DaemonSet"
+ }
+ },
+ "deployments": {
+ "description": "deployment list",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Deployment"
+ }
+ },
+ "statefulsets": {
+ "description": "statefulset list",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.StatefulSet"
+ }
+ }
+ }
+ },
+ "prometheus.Metric": {
+ "required": [
+ "matrix",
+ "err"
+ ],
+ "properties": {
+ "matrix": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/model.SampleStream"
+ }
+ }
+ }
+ },
+ "prometheus.Metrics.histograms": {},
+ "registries.Config": {
+ "properties": {
+ "ArgsEscaped": {
+ "description": "Command is already escaped (Windows only)",
+ "type": "boolean"
+ },
+ "AttachStderr": {
+ "description": "Boolean value, attaches to stderr.",
+ "type": "boolean"
+ },
+ "AttachStdin": {
+ "description": "Boolean value, attaches to stdin.",
+ "type": "boolean"
+ },
+ "AttachStdout": {
+ "description": "Boolean value, attaches to stdout.",
+ "type": "boolean"
+ },
+ "Cmd": {
+ "description": "Command to run specified as a string or an array of strings.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "Domainname": {
+ "description": "A string value containing the domain name to use for the container.",
+ "type": "string"
+ },
+ "Entrypoint": {
+ "description": "The entry point set for the container as a string or an array of strings.",
+ "$ref": "#/definitions/registries.Config.Entrypoint"
+ },
+ "Env": {
+ "description": "A list of environment variables in the form of [\"VAR=value\", ...]",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ExposedPorts": {
+ "description": "An object mapping ports to an empty object in the form of: \"ExposedPorts\": { \"\u003cport\u003e/\u003ctcp|udp\u003e: {}\" }",
+ "type": "object"
+ },
+ "Hostname": {
+ "description": "A string value containing the hostname to use for the container.",
+ "type": "string"
+ },
+ "Image": {
+ "description": "A string specifying the image name to use for the container.",
+ "type": "string"
+ },
+ "Labels": {
+ "description": "The map of labels to a container.",
+ "$ref": "#/definitions/registries.Labels"
+ },
+ "OnBuild": {
+ "description": "ONBUILD metadata that were defined in the image's Dockerfile.",
+ "$ref": "#/definitions/registries.Config.OnBuild"
+ },
+ "OpenStdin": {
+ "description": "Boolean value, opens stdin",
+ "type": "boolean"
+ },
+ "StdinOnce": {
+ "description": "Boolean value, close stdin after the 1 attached client disconnects.",
+ "type": "boolean"
+ },
+ "StopSignal": {
+ "description": "Signal to stop a container as a string or unsigned integer.",
+ "type": "string"
+ },
+ "Tty": {
+ "description": "Boolean value, Attach standard streams to a tty, including stdin if it is not closed.",
+ "type": "boolean"
+ },
+ "User": {
+ "description": "A string value specifying the user inside the container.",
+ "type": "string"
+ },
+ "Volumes": {
+ "description": "An object mapping mount point paths (strings) inside the container to empty objects.",
+ "$ref": "#/definitions/registries.Config.Volumes"
+ },
+ "WorkingDir": {
+ "description": "A string specifying the working directory for commands to run in.",
+ "type": "string"
+ }
+ }
+ },
+ "registries.Config.Entrypoint": {},
+ "registries.Config.OnBuild": {},
+ "registries.Config.Volumes": {},
+ "registries.ContainerConfig": {
+ "properties": {
+ "ArgsEscaped": {
+ "description": "Command is already escaped (Windows only)",
+ "type": "boolean"
+ },
+ "AttachStderr": {
+ "description": "Boolean value, attaches to stderr.",
+ "type": "boolean"
+ },
+ "AttachStdin": {
+ "description": "Boolean value, attaches to stdin.",
+ "type": "boolean"
+ },
+ "AttachStdout": {
+ "description": "Boolean value, attaches to stdout.",
+ "type": "boolean"
+ },
+ "Cmd": {
+ "description": "Command to run specified as a string or an array of strings.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "Domainname": {
+ "description": "A string value containing the domain name to use for the container.",
+ "type": "string"
+ },
+ "Entrypoint": {
+ "description": "The entry point set for the container as a string or an array of strings.",
+ "$ref": "#/definitions/registries.ContainerConfig.Entrypoint"
+ },
+ "Env": {
+ "description": "A list of environment variables in the form of [\"VAR=value\", ...]",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ExposedPorts": {
+ "description": "An object mapping ports to an empty object in the form of: \"ExposedPorts\": { \"\u003cport\u003e/\u003ctcp|udp\u003e: {}\" }",
+ "type": "object"
+ },
+ "Hostname": {
+ "description": "A string value containing the hostname to use for the container.",
+ "type": "string"
+ },
+ "Image": {
+ "description": "A string specifying the image name to use for the container.",
+ "type": "string"
+ },
+ "Labels": {
+ "description": "The map of labels to a container.",
+ "$ref": "#/definitions/registries.Labels"
+ },
+ "OnBuild": {
+ "description": "ONBUILD metadata that were defined in the image's Dockerfile.",
+ "$ref": "#/definitions/registries.ContainerConfig.OnBuild"
+ },
+ "OpenStdin": {
+ "description": "Boolean value, opens stdin",
+ "type": "boolean"
+ },
+ "StdinOnce": {
+ "description": "Boolean value, close stdin after the 1 attached client disconnects.",
+ "type": "boolean"
+ },
+ "StopSignal": {
+ "description": "Signal to stop a container as a string or unsigned integer.",
+ "type": "string"
+ },
+ "Tty": {
+ "description": "Boolean value, Attach standard streams to a tty, including stdin if it is not closed.",
+ "type": "boolean"
+ },
+ "User": {
+ "description": "A string value specifying the user inside the container.",
+ "type": "string"
+ },
+ "Volumes": {
+ "description": "An object mapping mount point paths (strings) inside the container to empty objects.",
+ "$ref": "#/definitions/registries.ContainerConfig.Volumes"
+ },
+ "WorkingDir": {
+ "description": "A string specifying the working directory for commands to run in.",
+ "type": "string"
+ }
+ }
+ },
+ "registries.ContainerConfig.Entrypoint": {},
+ "registries.ContainerConfig.OnBuild": {},
+ "registries.ContainerConfig.Volumes": {},
+ "registries.History": {
+ "properties": {
+ "created": {
+ "description": "Created time.",
+ "type": "string",
+ "format": "date-time"
+ },
+ "created_by": {
+ "description": "Created command.",
+ "type": "string"
+ },
+ "empty_layer": {
+ "description": "Layer empty or not.",
+ "type": "boolean"
+ }
+ }
+ },
+ "registries.ImageBlob": {
+ "required": [
+ "rootfs omitempty"
+ ],
+ "properties": {
+ "architecture": {
+ "description": "The architecture field specifies the CPU architecture, for example amd64 or ppc64le.",
+ "type": "string"
+ },
+ "config": {
+ "description": "The config field references a configuration object for a container.",
+ "$ref": "#/definitions/registries.Config"
+ },
+ "container": {
+ "description": "Container id.",
+ "type": "string"
+ },
+ "container_config": {
+ "description": "The config data of container.",
+ "$ref": "#/definitions/registries.ContainerConfig"
+ },
+ "created": {
+ "description": "Create time.",
+ "type": "string",
+ "format": "date-time"
+ },
+ "docker_version": {
+ "description": "docker version.",
+ "type": "string"
+ },
+ "history": {
+ "description": "The data of history update.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/registries.History"
+ }
+ },
+ "os": {
+ "description": "Operating system.",
+ "type": "string"
+ },
+ "rootfs omitempty": {
+ "description": "Root filesystem.",
+ "$ref": "#/definitions/registries.Rootfs"
+ }
+ }
+ },
+ "registries.ImageDetails": {
+ "properties": {
+ "imageBlob": {
+ "description": "Retrieve the blob from the registry identified. Reference: https://docs.docker.com/registry/spec/api/#blob",
+ "$ref": "#/definitions/registries.ImageBlob"
+ },
+ "imageManifest": {
+ "description": "Retrieve the manifest from the registry identified. Reference: https://docs.docker.com/registry/spec/api/#manifest",
+ "$ref": "#/definitions/registries.ImageManifest"
+ },
+ "imageTag": {
+ "description": "image tag.",
+ "type": "string"
+ },
+ "message": {
+ "description": "Status message.",
+ "type": "string"
+ },
+ "registry": {
+ "description": "registry domain.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status is the status of the image search, such as \"succeeded\".",
+ "type": "string"
+ }
+ }
+ },
+ "registries.ImageManifest": {
+ "properties": {
+ "config": {
+ "description": "The config field references a configuration object for a container.",
+ "$ref": "#/definitions/registries.ManifestConfig"
+ },
+ "layers": {
+ "description": "Fields of an item in the layers list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/registries.Layers"
+ }
+ },
+ "mediaType": {
+ "description": "The MIME type of the manifest.",
+ "type": "string"
+ },
+ "schemaVersion": {
+ "description": "This field specifies the image manifest schema version as an integer.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "registries.Labels": {
+ "required": [
+ "maintainer"
+ ],
+ "properties": {
+ "maintainer": {
+ "type": "string"
+ }
+ }
+ },
+ "registries.Layers": {
+ "properties": {
+ "digest": {
+ "description": "The digest of the content, as defined by the Registry V2 HTTP API Specificiation. Reference https://docs.docker.com/registry/spec/api/#digest-parameter",
+ "type": "string"
+ },
+ "mediaType": {
+ "description": "The MIME type of the layer.",
+ "type": "string"
+ },
+ "size": {
+ "description": "The size in bytes of the layer.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "registries.ManifestConfig": {
+ "properties": {
+ "digest": {
+ "description": "The digest of the content, as defined by the Registry V2 HTTP API Specificiation. Reference https://docs.docker.com/registry/spec/api/#digest-parameter",
+ "type": "string"
+ },
+ "mediaType": {
+ "description": "The MIME type of the image.",
+ "type": "string"
+ },
+ "size": {
+ "description": "The size in bytes of the image.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "registries.Rootfs": {
+ "properties": {
+ "diff_ids": {
+ "description": "Contain ids of layer list",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "type": {
+ "description": "Root filesystem type, always \"layers\" ",
+ "type": "string"
+ }
+ }
+ },
+ "resource.Quantity": {
+ "required": [
+ "i",
+ "d",
+ "s",
+ "Format"
+ ],
+ "properties": {
+ "Format": {
+ "type": "string"
+ },
+ "d": {
+ "$ref": "#/definitions/resource.infDecAmount"
+ },
+ "i": {
+ "$ref": "#/definitions/resource.int64Amount"
+ },
+ "s": {
+ "type": "string"
+ }
+ }
+ },
+ "resource.Scale": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "resource.infDecAmount": {
+ "required": [
+ "Dec"
+ ],
+ "properties": {
+ "Dec": {
+ "$ref": "#/definitions/inf.Dec"
+ }
+ }
+ },
+ "resource.int64Amount": {
+ "required": [
+ "value",
+ "scale"
+ ],
+ "properties": {
+ "scale": {
+ "$ref": "#/definitions/resource.Scale"
+ },
+ "value": {
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "strfmt.Base64": {
+ "type": "string"
+ },
+ "v1.AWSElasticBlockStoreVolumeSource": {
+ "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
+ "required": [
+ "volumeID"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "type": "string"
+ },
+ "partition": {
+ "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).",
+ "type": "integer",
+ "format": "int32"
+ },
+ "readOnly": {
+ "description": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "type": "boolean"
+ },
+ "volumeID": {
+ "description": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Affinity": {
+ "description": "Affinity is a group of affinity scheduling rules.",
+ "properties": {
+ "nodeAffinity": {
+ "description": "Describes node affinity scheduling rules for the pod.",
+ "$ref": "#/definitions/v1.NodeAffinity"
+ },
+ "podAffinity": {
+ "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
+ "$ref": "#/definitions/v1.PodAffinity"
+ },
+ "podAntiAffinity": {
+ "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
+ "$ref": "#/definitions/v1.PodAntiAffinity"
+ }
+ }
+ },
+ "v1.AggregationRule": {
+ "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
+ "properties": {
+ "clusterRoleSelectors": {
+ "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.LabelSelector"
+ }
+ }
+ }
+ },
+ "v1.AzureDataDiskCachingMode": {},
+ "v1.AzureDataDiskKind": {},
+ "v1.AzureDiskVolumeSource": {
+ "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ "required": [
+ "diskName",
+ "diskURI"
+ ],
+ "properties": {
+ "cachingMode": {
+ "description": "Host Caching mode: None, Read Only, Read Write.",
+ "$ref": "#/definitions/v1.AzureDataDiskCachingMode"
+ },
+ "diskName": {
+ "description": "The Name of the data disk in the blob storage",
+ "type": "string"
+ },
+ "diskURI": {
+ "description": "The URI the data disk in the blob storage",
+ "type": "string"
+ },
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
+ "$ref": "#/definitions/v1.AzureDataDiskKind"
+ },
+ "readOnly": {
+ "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.AzureFileVolumeSource": {
+ "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "required": [
+ "secretName",
+ "shareName"
+ ],
+ "properties": {
+ "readOnly": {
+ "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "secretName": {
+ "description": "the name of secret that contains Azure Storage Account Name and Key",
+ "type": "string"
+ },
+ "shareName": {
+ "description": "Share Name",
+ "type": "string"
+ }
+ }
+ },
+ "v1.CSIVolumeSource": {
+ "description": "Represents a source location of a volume to mount, managed by an external CSI driver",
+ "required": [
+ "driver"
+ ],
+ "properties": {
+ "driver": {
+ "description": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.",
+ "type": "string"
+ },
+ "fsType": {
+ "description": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.",
+ "type": "string"
+ },
+ "nodePublishSecretRef": {
+ "description": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "readOnly": {
+ "description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).",
+ "type": "boolean"
+ },
+ "volumeAttributes": {
+ "description": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.Capabilities": {
+ "description": "Adds and removes POSIX capabilities from running containers.",
+ "properties": {
+ "add": {
+ "description": "Added capabilities",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Capability"
+ }
+ },
+ "drop": {
+ "description": "Removed capabilities",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Capability"
+ }
+ }
+ }
+ },
+ "v1.Capability": {},
+ "v1.CephFSVolumeSource": {
+ "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+ "required": [
+ "monitors"
+ ],
+ "properties": {
+ "monitors": {
+ "description": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "path": {
+ "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+ "type": "boolean"
+ },
+ "secretFile": {
+ "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+ "type": "string"
+ },
+ "secretRef": {
+ "description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "user": {
+ "description": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+ "type": "string"
+ }
+ }
+ },
+ "v1.CinderVolumeSource": {
+ "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+ "required": [
+ "volumeID"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "Optional: points to a secret object containing parameters used to connect to OpenStack.",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "volumeID": {
+ "description": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ClientIPConfig": {
+ "description": "ClientIPConfig represents the configurations of Client IP based session affinity.",
+ "properties": {
+ "timeoutSeconds": {
+ "description": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be \u003e0 \u0026\u0026 \u003c=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.ClusterRole": {
+ "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.",
+ "required": [
+ "rules"
+ ],
+ "properties": {
+ "aggregationRule": {
+ "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.",
+ "$ref": "#/definitions/v1.AggregationRule"
+ },
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata.",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "rules": {
+ "description": "Rules holds all the PolicyRules for this ClusterRole",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PolicyRule"
+ }
+ }
+ }
+ },
+ "v1.ConfigMapEnvSource": {
+ "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
+ "properties": {
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the ConfigMap must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.ConfigMapKeySelector": {
+ "description": "Selects a key from a ConfigMap.",
+ "required": [
+ "key"
+ ],
+ "properties": {
+ "key": {
+ "description": "The key to select.",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the ConfigMap or its key must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.ConfigMapProjection": {
+ "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
+ "properties": {
+ "items": {
+ "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.KeyToPath"
+ }
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the ConfigMap or its keys must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.ConfigMapVolumeSource": {
+ "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
+ "properties": {
+ "defaultMode": {
+ "description": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "items": {
+ "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.KeyToPath"
+ }
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the ConfigMap or its keys must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.Container": {
+ "description": "A single application container that you want to run within a pod.",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "args": {
+ "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "command": {
+ "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "env": {
+ "description": "List of environment variables to set in the container. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.EnvVar"
+ }
+ },
+ "envFrom": {
+ "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.EnvFromSource"
+ }
+ },
+ "image": {
+ "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+ "type": "string"
+ },
+ "imagePullPolicy": {
+ "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
+ "type": "string"
+ },
+ "lifecycle": {
+ "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ "$ref": "#/definitions/v1.Lifecycle"
+ },
+ "livenessProbe": {
+ "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "name": {
+ "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
+ "type": "string"
+ },
+ "ports": {
+ "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.ContainerPort"
+ }
+ },
+ "readinessProbe": {
+ "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "resources": {
+ "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ "$ref": "#/definitions/v1.ResourceRequirements"
+ },
+ "securityContext": {
+ "description": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ "$ref": "#/definitions/v1.SecurityContext"
+ },
+ "startupProbe": {
+ "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "stdin": {
+ "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+ "type": "boolean"
+ },
+ "stdinOnce": {
+ "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+ "type": "boolean"
+ },
+ "terminationMessagePath": {
+ "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
+ "type": "string"
+ },
+ "terminationMessagePolicy": {
+ "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
+ "type": "string"
+ },
+ "tty": {
+ "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+ "type": "boolean"
+ },
+ "volumeDevices": {
+ "description": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.VolumeDevice"
+ }
+ },
+ "volumeMounts": {
+ "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.VolumeMount"
+ }
+ },
+ "workingDir": {
+ "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ContainerPort": {
+ "description": "ContainerPort represents a network port in a single container.",
+ "required": [
+ "containerPort"
+ ],
+ "properties": {
+ "containerPort": {
+ "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "hostIP": {
+ "description": "What host IP to bind the external port to.",
+ "type": "string"
+ },
+ "hostPort": {
+ "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "name": {
+ "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
+ "type": "string"
+ },
+ "protocol": {
+ "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".",
+ "type": "string"
+ }
+ }
+ },
+ "v1.DaemonSet": {
+ "description": "DaemonSet represents the configuration of a daemon set.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.DaemonSetSpec"
+ },
+ "status": {
+ "description": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.DaemonSetStatus"
+ }
+ }
+ },
+ "v1.DaemonSetCondition": {
+ "description": "DaemonSetCondition describes the state of a DaemonSet at a certain point.",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "description": "Last time the condition transitioned from one status to another.",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human readable message indicating details about the transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "The reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of DaemonSet condition.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.DaemonSetSpec": {
+ "description": "DaemonSetSpec is the specification of a daemon set.",
+ "required": [
+ "selector",
+ "template"
+ ],
+ "properties": {
+ "minReadySeconds": {
+ "description": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).",
+ "type": "integer",
+ "format": "int32"
+ },
+ "revisionHistoryLimit": {
+ "description": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "selector": {
+ "description": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "template": {
+ "description": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "$ref": "#/definitions/v1.PodTemplateSpec"
+ },
+ "updateStrategy": {
+ "description": "An update strategy to replace existing DaemonSet pods with new pods.",
+ "$ref": "#/definitions/v1.DaemonSetUpdateStrategy"
+ }
+ }
+ },
+ "v1.DaemonSetStatus": {
+ "description": "DaemonSetStatus represents the current status of a daemon set.",
+ "required": [
+ "currentNumberScheduled",
+ "numberMisscheduled",
+ "desiredNumberScheduled",
+ "numberReady"
+ ],
+ "properties": {
+ "collisionCount": {
+ "description": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "conditions": {
+ "description": "Represents the latest available observations of a DaemonSet's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.DaemonSetCondition"
+ }
+ },
+ "currentNumberScheduled": {
+ "description": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "type": "integer",
+ "format": "int32"
+ },
+ "desiredNumberScheduled": {
+ "description": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberAvailable": {
+ "description": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberMisscheduled": {
+ "description": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberReady": {
+ "description": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "numberUnavailable": {
+ "description": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)",
+ "type": "integer",
+ "format": "int32"
+ },
+ "observedGeneration": {
+ "description": "The most recent generation observed by the daemon set controller.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "updatedNumberScheduled": {
+ "description": "The total number of nodes that are running updated daemon pod",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.DaemonSetUpdateStrategy": {
+ "description": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.",
+ "properties": {
+ "rollingUpdate": {
+ "description": "Rolling update config params. Present only if type = \"RollingUpdate\".",
+ "$ref": "#/definitions/v1.RollingUpdateDaemonSet"
+ },
+ "type": {
+ "description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Deployment": {
+ "description": "Deployment enables declarative updates for Pods and ReplicaSets.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object metadata.",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of the Deployment.",
+ "$ref": "#/definitions/v1.DeploymentSpec"
+ },
+ "status": {
+ "description": "Most recently observed status of the Deployment.",
+ "$ref": "#/definitions/v1.DeploymentStatus"
+ }
+ }
+ },
+ "v1.DeploymentCondition": {
+ "description": "DeploymentCondition describes the state of a deployment at a certain point.",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "description": "Last time the condition transitioned from one status to another.",
+ "type": "string"
+ },
+ "lastUpdateTime": {
+ "description": "The last time this condition was updated.",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human readable message indicating details about the transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "The reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of deployment condition.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.DeploymentSpec": {
+ "description": "DeploymentSpec is the specification of the desired behavior of the Deployment.",
+ "required": [
+ "selector",
+ "template"
+ ],
+ "properties": {
+ "minReadySeconds": {
+ "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "type": "integer",
+ "format": "int32"
+ },
+ "paused": {
+ "description": "Indicates that the deployment is paused.",
+ "type": "boolean"
+ },
+ "progressDeadlineSeconds": {
+ "description": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "replicas": {
+ "description": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "revisionHistoryLimit": {
+ "description": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "selector": {
+ "description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "strategy": {
+ "description": "The deployment strategy to use to replace existing pods with new ones.",
+ "$ref": "#/definitions/v1.DeploymentStrategy"
+ },
+ "template": {
+ "description": "Template describes the pods that will be created.",
+ "$ref": "#/definitions/v1.PodTemplateSpec"
+ }
+ }
+ },
+ "v1.DeploymentStatus": {
+ "description": "DeploymentStatus is the most recently observed status of the Deployment.",
+ "properties": {
+ "availableReplicas": {
+ "description": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "collisionCount": {
+ "description": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "conditions": {
+ "description": "Represents the latest available observations of a deployment's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.DeploymentCondition"
+ }
+ },
+ "observedGeneration": {
+ "description": "The generation observed by the deployment controller.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "readyReplicas": {
+ "description": "Total number of ready pods targeted by this deployment.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "replicas": {
+ "description": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).",
+ "type": "integer",
+ "format": "int32"
+ },
+ "unavailableReplicas": {
+ "description": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "updatedReplicas": {
+ "description": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.DeploymentStrategy": {
+ "description": "DeploymentStrategy describes how to replace existing pods with new ones.",
+ "properties": {
+ "rollingUpdate": {
+ "description": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.",
+ "$ref": "#/definitions/v1.RollingUpdateDeployment"
+ },
+ "type": {
+ "description": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.DownwardAPIProjection": {
+ "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
+ "properties": {
+ "items": {
+ "description": "Items is a list of DownwardAPIVolume file",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.DownwardAPIVolumeFile"
+ }
+ }
+ }
+ },
+ "v1.DownwardAPIVolumeFile": {
+ "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field",
+ "required": [
+ "path"
+ ],
+ "properties": {
+ "fieldRef": {
+ "description": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.",
+ "$ref": "#/definitions/v1.ObjectFieldSelector"
+ },
+ "mode": {
+ "description": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "path": {
+ "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
+ "type": "string"
+ },
+ "resourceFieldRef": {
+ "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
+ "$ref": "#/definitions/v1.ResourceFieldSelector"
+ }
+ }
+ },
+ "v1.DownwardAPIVolumeSource": {
+ "description": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
+ "properties": {
+ "defaultMode": {
+ "description": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "items": {
+ "description": "Items is a list of downward API volume file",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.DownwardAPIVolumeFile"
+ }
+ }
+ }
+ },
+ "v1.EmptyDirVolumeSource": {
+ "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.",
+ "properties": {
+ "medium": {
+ "description": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ "type": "string"
+ },
+ "sizeLimit": {
+ "description": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir",
+ "type": "string"
+ }
+ }
+ },
+ "v1.EnvFromSource": {
+ "description": "EnvFromSource represents the source of a set of ConfigMaps",
+ "properties": {
+ "configMapRef": {
+ "description": "The ConfigMap to select from",
+ "$ref": "#/definitions/v1.ConfigMapEnvSource"
+ },
+ "prefix": {
+ "description": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
+ "type": "string"
+ },
+ "secretRef": {
+ "description": "The Secret to select from",
+ "$ref": "#/definitions/v1.SecretEnvSource"
+ }
+ }
+ },
+ "v1.EnvVar": {
+ "description": "EnvVar represents an environment variable present in a Container.",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of the environment variable. Must be a C_IDENTIFIER.",
+ "type": "string"
+ },
+ "value": {
+ "description": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
+ "type": "string"
+ },
+ "valueFrom": {
+ "description": "Source for the environment variable's value. Cannot be used if value is not empty.",
+ "$ref": "#/definitions/v1.EnvVarSource"
+ }
+ }
+ },
+ "v1.EnvVarSource": {
+ "description": "EnvVarSource represents a source for the value of an EnvVar.",
+ "properties": {
+ "configMapKeyRef": {
+ "description": "Selects a key of a ConfigMap.",
+ "$ref": "#/definitions/v1.ConfigMapKeySelector"
+ },
+ "fieldRef": {
+ "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
+ "$ref": "#/definitions/v1.ObjectFieldSelector"
+ },
+ "resourceFieldRef": {
+ "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
+ "$ref": "#/definitions/v1.ResourceFieldSelector"
+ },
+ "secretKeyRef": {
+ "description": "Selects a key of a secret in the pod's namespace",
+ "$ref": "#/definitions/v1.SecretKeySelector"
+ }
+ }
+ },
+ "v1.EphemeralContainer": {
+ "description": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "args": {
+ "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "command": {
+ "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "env": {
+ "description": "List of environment variables to set in the container. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.EnvVar"
+ }
+ },
+ "envFrom": {
+ "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.EnvFromSource"
+ }
+ },
+ "image": {
+ "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images",
+ "type": "string"
+ },
+ "imagePullPolicy": {
+ "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
+ "type": "string"
+ },
+ "lifecycle": {
+ "description": "Lifecycle is not allowed for ephemeral containers.",
+ "$ref": "#/definitions/v1.Lifecycle"
+ },
+ "livenessProbe": {
+ "description": "Probes are not allowed for ephemeral containers.",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "name": {
+ "description": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.",
+ "type": "string"
+ },
+ "ports": {
+ "description": "Ports are not allowed for ephemeral containers.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.ContainerPort"
+ }
+ },
+ "readinessProbe": {
+ "description": "Probes are not allowed for ephemeral containers.",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "resources": {
+ "description": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
+ "$ref": "#/definitions/v1.ResourceRequirements"
+ },
+ "securityContext": {
+ "description": "SecurityContext is not allowed for ephemeral containers.",
+ "$ref": "#/definitions/v1.SecurityContext"
+ },
+ "startupProbe": {
+ "description": "Probes are not allowed for ephemeral containers.",
+ "$ref": "#/definitions/v1.Probe"
+ },
+ "stdin": {
+ "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+ "type": "boolean"
+ },
+ "stdinOnce": {
+ "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+ "type": "boolean"
+ },
+ "targetContainerName": {
+ "description": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.",
+ "type": "string"
+ },
+ "terminationMessagePath": {
+ "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
+ "type": "string"
+ },
+ "terminationMessagePolicy": {
+ "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
+ "type": "string"
+ },
+ "tty": {
+ "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+ "type": "boolean"
+ },
+ "volumeDevices": {
+ "description": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.VolumeDevice"
+ }
+ },
+ "volumeMounts": {
+ "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.VolumeMount"
+ }
+ },
+ "workingDir": {
+ "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Event": {
+ "description": "Event is a report of an event somewhere in the cluster.",
+ "required": [
+ "metadata",
+ "involvedObject",
+ "reportingComponent",
+ "reportingInstance"
+ ],
+ "properties": {
+ "action": {
+ "description": "What action was taken/failed regarding to the Regarding object.",
+ "type": "string"
+ },
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "count": {
+ "description": "The number of times this event has occurred.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "eventTime": {
+ "description": "Time when this Event was first observed.",
+ "type": "string"
+ },
+ "firstTimestamp": {
+ "description": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
+ "type": "string"
+ },
+ "involvedObject": {
+ "description": "The object that this event is about.",
+ "$ref": "#/definitions/v1.ObjectReference"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "lastTimestamp": {
+ "description": "The time at which the most recent occurrence of this event was recorded.",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human-readable description of the status of this operation.",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "reason": {
+ "description": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
+ "type": "string"
+ },
+ "related": {
+ "description": "Optional secondary object for more complex actions.",
+ "$ref": "#/definitions/v1.ObjectReference"
+ },
+ "reportingComponent": {
+ "description": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.",
+ "type": "string"
+ },
+ "reportingInstance": {
+ "description": "ID of the controller instance, e.g. `kubelet-xyzf`.",
+ "type": "string"
+ },
+ "series": {
+ "description": "Data about the Event series this event represents or nil if it's a singleton Event.",
+ "$ref": "#/definitions/v1.EventSeries"
+ },
+ "source": {
+ "description": "The component reporting this event. Should be a short machine understandable string.",
+ "$ref": "#/definitions/v1.EventSource"
+ },
+ "type": {
+ "description": "Type of this event (Normal, Warning), new types could be added in the future",
+ "type": "string"
+ }
+ }
+ },
+ "v1.EventSeries": {
+ "description": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
+ "properties": {
+ "count": {
+ "description": "Number of occurrences in this series up to the last heartbeat time",
+ "type": "integer",
+ "format": "int32"
+ },
+ "lastObservedTime": {
+ "description": "Time of the last occurrence observed",
+ "type": "string"
+ },
+ "state": {
+ "description": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18",
+ "type": "string"
+ }
+ }
+ },
+ "v1.EventSource": {
+ "description": "EventSource contains information for an event.",
+ "properties": {
+ "component": {
+ "description": "Component from which the event is generated.",
+ "type": "string"
+ },
+ "host": {
+ "description": "Node name on which the event is generated.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ExecAction": {
+ "description": "ExecAction describes a \"run in container\" action.",
+ "properties": {
+ "command": {
+ "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.FCVolumeSource": {
+ "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.",
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "lun": {
+ "description": "Optional: FC target lun number",
+ "type": "integer",
+ "format": "int32"
+ },
+ "readOnly": {
+ "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "targetWWNs": {
+ "description": "Optional: FC target worldwide names (WWNs)",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "wwids": {
+ "description": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.FinalizerName": {},
+ "v1.FlexVolumeSource": {
+ "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ "required": [
+ "driver"
+ ],
+ "properties": {
+ "driver": {
+ "description": "Driver is the name of the driver to use for this volume.",
+ "type": "string"
+ },
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+ "type": "string"
+ },
+ "options": {
+ "description": "Optional: Extra command options if any.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "readOnly": {
+ "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ }
+ }
+ },
+ "v1.FlockerVolumeSource": {
+ "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.",
+ "properties": {
+ "datasetName": {
+ "description": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated",
+ "type": "string"
+ },
+ "datasetUUID": {
+ "description": "UUID of the dataset. This is unique identifier of a Flocker dataset",
+ "type": "string"
+ }
+ }
+ },
+ "v1.GCEPersistentDiskVolumeSource": {
+ "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.",
+ "required": [
+ "pdName"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "type": "string"
+ },
+ "partition": {
+ "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "type": "integer",
+ "format": "int32"
+ },
+ "pdName": {
+ "description": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.GitRepoVolumeSource": {
+ "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+ "required": [
+ "repository"
+ ],
+ "properties": {
+ "directory": {
+ "description": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
+ "type": "string"
+ },
+ "repository": {
+ "description": "Repository URL",
+ "type": "string"
+ },
+ "revision": {
+ "description": "Commit hash for the specified revision.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.GlusterfsVolumeSource": {
+ "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+ "required": [
+ "endpoints",
+ "path"
+ ],
+ "properties": {
+ "endpoints": {
+ "description": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+ "type": "string"
+ },
+ "path": {
+ "description": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.HTTPGetAction": {
+ "description": "HTTPGetAction describes an action based on HTTP Get requests.",
+ "required": [
+ "port"
+ ],
+ "properties": {
+ "host": {
+ "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.",
+ "type": "string"
+ },
+ "httpHeaders": {
+ "description": "Custom headers to set in the request. HTTP allows repeated headers.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.HTTPHeader"
+ }
+ },
+ "path": {
+ "description": "Path to access on the HTTP server.",
+ "type": "string"
+ },
+ "port": {
+ "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+ "type": "string"
+ },
+ "scheme": {
+ "description": "Scheme to use for connecting to the host. Defaults to HTTP.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.HTTPHeader": {
+ "description": "HTTPHeader describes a custom header to be used in HTTP probes",
+ "required": [
+ "name",
+ "value"
+ ],
+ "properties": {
+ "name": {
+ "description": "The header field name",
+ "type": "string"
+ },
+ "value": {
+ "description": "The header field value",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Handler": {
+ "description": "Handler defines a specific action that should be taken",
+ "properties": {
+ "exec": {
+ "description": "One and only one of the following should be specified. Exec specifies the action to take.",
+ "$ref": "#/definitions/v1.ExecAction"
+ },
+ "httpGet": {
+ "description": "HTTPGet specifies the http request to perform.",
+ "$ref": "#/definitions/v1.HTTPGetAction"
+ },
+ "tcpSocket": {
+ "description": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
+ "$ref": "#/definitions/v1.TCPSocketAction"
+ }
+ }
+ },
+ "v1.HostAlias": {
+ "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.",
+ "properties": {
+ "hostnames": {
+ "description": "Hostnames for the above IP address.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ip": {
+ "description": "IP address of the host file entry.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.HostPathType": {},
+ "v1.HostPathVolumeSource": {
+ "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
+ "required": [
+ "path"
+ ],
+ "properties": {
+ "path": {
+ "description": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ "$ref": "#/definitions/v1.HostPathType"
+ }
+ }
+ },
+ "v1.IPFamily": {},
+ "v1.ISCSIVolumeSource": {
+ "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+ "required": [
+ "targetPortal",
+ "iqn",
+ "lun"
+ ],
+ "properties": {
+ "chapAuthDiscovery": {
+ "description": "whether support iSCSI Discovery CHAP authentication",
+ "type": "boolean"
+ },
+ "chapAuthSession": {
+ "description": "whether support iSCSI Session CHAP authentication",
+ "type": "boolean"
+ },
+ "fsType": {
+ "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
+ "type": "string"
+ },
+ "initiatorName": {
+ "description": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.",
+ "type": "string"
+ },
+ "iqn": {
+ "description": "Target iSCSI Qualified Name.",
+ "type": "string"
+ },
+ "iscsiInterface": {
+ "description": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
+ "type": "string"
+ },
+ "lun": {
+ "description": "iSCSI Target Lun number.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "portals": {
+ "description": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "CHAP Secret for iSCSI target and initiator authentication",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "targetPortal": {
+ "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+ "type": "string"
+ }
+ }
+ },
+ "v1.KeyToPath": {
+ "description": "Maps a string key to a path within a volume.",
+ "required": [
+ "key",
+ "path"
+ ],
+ "properties": {
+ "key": {
+ "description": "The key to project.",
+ "type": "string"
+ },
+ "mode": {
+ "description": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "path": {
+ "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.LabelSelector": {
+ "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+ "properties": {
+ "matchExpressions": {
+ "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.LabelSelectorRequirement"
+ }
+ },
+ "matchLabels": {
+ "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.LabelSelectorRequirement": {
+ "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "required": [
+ "key",
+ "operator"
+ ],
+ "properties": {
+ "key": {
+ "description": "key is the label key that the selector applies to.",
+ "type": "string"
+ },
+ "operator": {
+ "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
+ "type": "string"
+ },
+ "values": {
+ "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.Lifecycle": {
+ "description": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
+ "properties": {
+ "postStart": {
+ "description": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "$ref": "#/definitions/v1.Handler"
+ },
+ "preStop": {
+ "description": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+ "$ref": "#/definitions/v1.Handler"
+ }
+ }
+ },
+ "v1.LoadBalancerIngress": {
+ "description": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
+ "properties": {
+ "hostname": {
+ "description": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
+ "type": "string"
+ },
+ "ip": {
+ "description": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
+ "type": "string"
+ }
+ }
+ },
+ "v1.LoadBalancerStatus": {
+ "description": "LoadBalancerStatus represents the status of a load-balancer.",
+ "properties": {
+ "ingress": {
+ "description": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.LoadBalancerIngress"
+ }
+ }
+ }
+ },
+ "v1.LocalObjectReference": {
+ "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
+ "properties": {
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ManagedFieldsEntry": {
+ "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+ "type": "string"
+ },
+ "fieldsType": {
+ "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
+ "type": "string"
+ },
+ "fieldsV1": {
+ "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
+ "type": "string"
+ },
+ "manager": {
+ "description": "Manager is an identifier of the workflow managing these fields.",
+ "type": "string"
+ },
+ "operation": {
+ "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+ "type": "string"
+ },
+ "time": {
+ "description": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
+ "type": "string"
+ }
+ }
+ },
+ "v1.MountPropagationMode": {},
+ "v1.NFSVolumeSource": {
+ "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
+ "required": [
+ "server",
+ "path"
+ ],
+ "properties": {
+ "path": {
+ "description": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ "type": "boolean"
+ },
+ "server": {
+ "description": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Namespace": {
+ "description": "Namespace provides a scope for Names. Use of multiple namespaces is optional.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.NamespaceSpec"
+ },
+ "status": {
+ "description": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.NamespaceStatus"
+ }
+ }
+ },
+ "v1.NamespaceCondition": {
+ "description": "NamespaceCondition contains details about state of namespace.",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "type": "string"
+ },
+ "message": {
+ "type": "string"
+ },
+ "reason": {
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of namespace controller condition.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.NamespaceSpec": {
+ "description": "NamespaceSpec describes the attributes on a Namespace.",
+ "properties": {
+ "finalizers": {
+ "description": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.FinalizerName"
+ }
+ }
+ }
+ },
+ "v1.NamespaceStatus": {
+ "description": "NamespaceStatus is information about the current status of a Namespace.",
+ "properties": {
+ "conditions": {
+ "description": "Represents the latest available observations of a namespace's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.NamespaceCondition"
+ }
+ },
+ "phase": {
+ "description": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+ "type": "string"
+ }
+ }
+ },
+ "v1.NodeAffinity": {
+ "description": "Node affinity is a group of node affinity scheduling rules.",
+ "properties": {
+ "preferredDuringSchedulingIgnoredDuringExecution": {
+ "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PreferredSchedulingTerm"
+ }
+ },
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
+ "$ref": "#/definitions/v1.NodeSelector"
+ }
+ }
+ },
+ "v1.NodeSelector": {
+ "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
+ "required": [
+ "nodeSelectorTerms"
+ ],
+ "properties": {
+ "nodeSelectorTerms": {
+ "description": "Required. A list of node selector terms. The terms are ORed.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.NodeSelectorTerm"
+ }
+ }
+ }
+ },
+ "v1.NodeSelectorRequirement": {
+ "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+ "required": [
+ "key",
+ "operator"
+ ],
+ "properties": {
+ "key": {
+ "description": "The label key that the selector applies to.",
+ "type": "string"
+ },
+ "operator": {
+ "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
+ "type": "string"
+ },
+ "values": {
+ "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.NodeSelectorTerm": {
+ "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
+ "properties": {
+ "matchExpressions": {
+ "description": "A list of node selector requirements by node's labels.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.NodeSelectorRequirement"
+ }
+ },
+ "matchFields": {
+ "description": "A list of node selector requirements by node's fields.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.NodeSelectorRequirement"
+ }
+ }
+ }
+ },
+ "v1.ObjectFieldSelector": {
+ "description": "ObjectFieldSelector selects an APIVersioned field of an object.",
+ "required": [
+ "fieldPath"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".",
+ "type": "string"
+ },
+ "fieldPath": {
+ "description": "Path of the field to select in the specified API version.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ObjectMeta": {
+ "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ "properties": {
+ "annotations": {
+ "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "clusterName": {
+ "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
+ "type": "string"
+ },
+ "creationTimestamp": {
+ "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "type": "string"
+ },
+ "deletionGracePeriodSeconds": {
+ "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "deletionTimestamp": {
+ "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "type": "string"
+ },
+ "finalizers": {
+ "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "generateName": {
+ "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ "type": "string"
+ },
+ "generation": {
+ "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "labels": {
+ "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "managedFields": {
+ "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.ManagedFieldsEntry"
+ }
+ },
+ "name": {
+ "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ "type": "string"
+ },
+ "ownerReferences": {
+ "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.OwnerReference"
+ }
+ },
+ "resourceVersion": {
+ "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ObjectReference": {
+ "description": "ObjectReference contains enough information to let you inspect or modify the referred object.",
+ "properties": {
+ "apiVersion": {
+ "description": "API version of the referent.",
+ "type": "string"
+ },
+ "fieldPath": {
+ "description": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+ "type": "string"
+ },
+ "resourceVersion": {
+ "description": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids",
+ "type": "string"
+ }
+ }
+ },
+ "v1.OwnerReference": {
+ "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
+ "required": [
+ "apiVersion",
+ "kind",
+ "name",
+ "uid"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "API version of the referent.",
+ "type": "string"
+ },
+ "blockOwnerDeletion": {
+ "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
+ "type": "boolean"
+ },
+ "controller": {
+ "description": "If true, this reference points to the managing controller.",
+ "type": "boolean"
+ },
+ "kind": {
+ "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PersistentVolumeAccessMode": {},
+ "v1.PersistentVolumeClaim": {
+ "description": "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ "$ref": "#/definitions/v1.PersistentVolumeClaimSpec"
+ },
+ "status": {
+ "description": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ "$ref": "#/definitions/v1.PersistentVolumeClaimStatus"
+ }
+ }
+ },
+ "v1.PersistentVolumeClaimCondition": {
+ "description": "PersistentVolumeClaimCondition contails details about state of pvc",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastProbeTime": {
+ "description": "Last time we probed the condition.",
+ "type": "string"
+ },
+ "lastTransitionTime": {
+ "description": "Last time the condition transitioned from one status to another.",
+ "type": "string"
+ },
+ "message": {
+ "description": "Human-readable message indicating details about last transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.",
+ "type": "string"
+ },
+ "status": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.PersistentVolumeClaimSpec": {
+ "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
+ "properties": {
+ "accessModes": {
+ "description": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PersistentVolumeAccessMode"
+ }
+ },
+ "dataSource": {
+ "description": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
+ "$ref": "#/definitions/v1.TypedLocalObjectReference"
+ },
+ "resources": {
+ "description": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+ "$ref": "#/definitions/v1.ResourceRequirements"
+ },
+ "selector": {
+ "description": "A label query over volumes to consider for binding.",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "storageClassName": {
+ "description": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
+ "type": "string"
+ },
+ "volumeMode": {
+ "description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
+ "$ref": "#/definitions/v1.PersistentVolumeMode"
+ },
+ "volumeName": {
+ "description": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PersistentVolumeClaimStatus": {
+ "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
+ "properties": {
+ "accessModes": {
+ "description": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PersistentVolumeAccessMode"
+ }
+ },
+ "capacity": {
+ "description": "Represents the actual resources of the underlying volume.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ },
+ "conditions": {
+ "description": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PersistentVolumeClaimCondition"
+ }
+ },
+ "phase": {
+ "description": "Phase represents the current phase of PersistentVolumeClaim.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PersistentVolumeClaimVolumeSource": {
+ "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
+ "required": [
+ "claimName"
+ ],
+ "properties": {
+ "claimName": {
+ "description": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Will force the ReadOnly setting in VolumeMounts. Default false.",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.PersistentVolumeMode": {},
+ "v1.PhotonPersistentDiskVolumeSource": {
+ "description": "Represents a Photon Controller persistent disk resource.",
+ "required": [
+ "pdID"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "pdID": {
+ "description": "ID that identifies Photon Controller persistent disk",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PodAffinity": {
+ "description": "Pod affinity is a group of inter pod affinity scheduling rules.",
+ "properties": {
+ "preferredDuringSchedulingIgnoredDuringExecution": {
+ "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.WeightedPodAffinityTerm"
+ }
+ },
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PodAffinityTerm"
+ }
+ }
+ }
+ },
+ "v1.PodAffinityTerm": {
+ "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running",
+ "required": [
+ "topologyKey"
+ ],
+ "properties": {
+ "labelSelector": {
+ "description": "A label query over a set of resources, in this case pods.",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "namespaces": {
+ "description": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "topologyKey": {
+ "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PodAntiAffinity": {
+ "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
+ "properties": {
+ "preferredDuringSchedulingIgnoredDuringExecution": {
+ "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.WeightedPodAffinityTerm"
+ }
+ },
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PodAffinityTerm"
+ }
+ }
+ }
+ },
+ "v1.PodDNSConfig": {
+ "description": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.",
+ "properties": {
+ "nameservers": {
+ "description": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "options": {
+ "description": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PodDNSConfigOption"
+ }
+ },
+ "searches": {
+ "description": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.PodDNSConfigOption": {
+ "description": "PodDNSConfigOption defines DNS resolver options of a pod.",
+ "properties": {
+ "name": {
+ "description": "Required.",
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ },
+ "v1.PodReadinessGate": {
+ "description": "PodReadinessGate contains the reference to a pod condition",
+ "required": [
+ "conditionType"
+ ],
+ "properties": {
+ "conditionType": {
+ "description": "ConditionType refers to a condition in the pod's condition list with matching type.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PodSecurityContext": {
+ "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
+ "properties": {
+ "fsGroup": {
+ "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+ "type": "integer",
+ "format": "int64"
+ },
+ "runAsGroup": {
+ "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "runAsNonRoot": {
+ "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "type": "boolean"
+ },
+ "runAsUser": {
+ "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "seLinuxOptions": {
+ "description": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+ "$ref": "#/definitions/v1.SELinuxOptions"
+ },
+ "supplementalGroups": {
+ "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ "sysctls": {
+ "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Sysctl"
+ }
+ },
+ "windowsOptions": {
+ "description": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "$ref": "#/definitions/v1.WindowsSecurityContextOptions"
+ }
+ }
+ },
+ "v1.PodSpec": {
+ "description": "PodSpec is a description of a pod.",
+ "required": [
+ "containers"
+ ],
+ "properties": {
+ "activeDeadlineSeconds": {
+ "description": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "affinity": {
+ "description": "If specified, the pod's scheduling constraints",
+ "$ref": "#/definitions/v1.Affinity"
+ },
+ "automountServiceAccountToken": {
+ "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
+ "type": "boolean"
+ },
+ "containers": {
+ "description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Container"
+ }
+ },
+ "dnsConfig": {
+ "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+ "$ref": "#/definitions/v1.PodDNSConfig"
+ },
+ "dnsPolicy": {
+ "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
+ "type": "string"
+ },
+ "enableServiceLinks": {
+ "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ "type": "boolean"
+ },
+ "ephemeralContainers": {
+ "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.EphemeralContainer"
+ }
+ },
+ "hostAliases": {
+ "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.HostAlias"
+ }
+ },
+ "hostIPC": {
+ "description": "Use the host's ipc namespace. Optional: Default to false.",
+ "type": "boolean"
+ },
+ "hostNetwork": {
+ "description": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+ "type": "boolean"
+ },
+ "hostPID": {
+ "description": "Use the host's pid namespace. Optional: Default to false.",
+ "type": "boolean"
+ },
+ "hostname": {
+ "description": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
+ "type": "string"
+ },
+ "imagePullSecrets": {
+ "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ }
+ },
+ "initContainers": {
+ "description": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Container"
+ }
+ },
+ "nodeName": {
+ "description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
+ "type": "string"
+ },
+ "nodeSelector": {
+ "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "overhead": {
+ "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ },
+ "preemptionPolicy": {
+ "description": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
+ "$ref": "#/definitions/v1.PreemptionPolicy"
+ },
+ "priority": {
+ "description": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "priorityClassName": {
+ "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+ "type": "string"
+ },
+ "readinessGates": {
+ "description": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PodReadinessGate"
+ }
+ },
+ "restartPolicy": {
+ "description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
+ "type": "string"
+ },
+ "runtimeClassName": {
+ "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
+ "type": "string"
+ },
+ "schedulerName": {
+ "description": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+ "type": "string"
+ },
+ "securityContext": {
+ "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
+ "$ref": "#/definitions/v1.PodSecurityContext"
+ },
+ "serviceAccount": {
+ "description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
+ "type": "string"
+ },
+ "serviceAccountName": {
+ "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+ "type": "string"
+ },
+ "shareProcessNamespace": {
+ "description": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
+ "type": "boolean"
+ },
+ "subdomain": {
+ "description": "If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.",
+ "type": "string"
+ },
+ "terminationGracePeriodSeconds": {
+ "description": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "tolerations": {
+ "description": "If specified, the pod's tolerations.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Toleration"
+ }
+ },
+ "topologySpreadConstraints": {
+ "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.TopologySpreadConstraint"
+ }
+ },
+ "volumes": {
+ "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.Volume"
+ }
+ }
+ }
+ },
+ "v1.PodTemplateSpec": {
+ "description": "PodTemplateSpec describes the data a pod should have when created from a template",
+ "properties": {
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.PodSpec"
+ }
+ }
+ },
+ "v1.PolicyRule": {
+ "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
+ "required": [
+ "verbs"
+ ],
+ "properties": {
+ "apiGroups": {
+ "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "nonResourceURLs": {
+ "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "resourceNames": {
+ "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "resources": {
+ "description": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "verbs": {
+ "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "v1.PortworxVolumeSource": {
+ "description": "PortworxVolumeSource represents a Portworx volume resource.",
+ "required": [
+ "volumeID"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "volumeID": {
+ "description": "VolumeID uniquely identifies a Portworx volume",
+ "type": "string"
+ }
+ }
+ },
+ "v1.PreemptionPolicy": {},
+ "v1.PreferredSchedulingTerm": {
+ "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
+ "required": [
+ "weight",
+ "preference"
+ ],
+ "properties": {
+ "preference": {
+ "description": "A node selector term, associated with the corresponding weight.",
+ "$ref": "#/definitions/v1.NodeSelectorTerm"
+ },
+ "weight": {
+ "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.Probe": {
+ "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+ "properties": {
+ "exec": {
+ "description": "One and only one of the following should be specified. Exec specifies the action to take.",
+ "$ref": "#/definitions/v1.ExecAction"
+ },
+ "failureThreshold": {
+ "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "httpGet": {
+ "description": "HTTPGet specifies the http request to perform.",
+ "$ref": "#/definitions/v1.HTTPGetAction"
+ },
+ "initialDelaySeconds": {
+ "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ "type": "integer",
+ "format": "int32"
+ },
+ "periodSeconds": {
+ "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "successThreshold": {
+ "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "tcpSocket": {
+ "description": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported",
+ "$ref": "#/definitions/v1.TCPSocketAction"
+ },
+ "timeoutSeconds": {
+ "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.ProcMountType": {},
+ "v1.ProjectedVolumeSource": {
+ "description": "Represents a projected volume source",
+ "required": [
+ "sources"
+ ],
+ "properties": {
+ "defaultMode": {
+ "description": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "sources": {
+ "description": "list of volume projections",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.VolumeProjection"
+ }
+ }
+ }
+ },
+ "v1.QuobyteVolumeSource": {
+ "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.",
+ "required": [
+ "registry",
+ "volume"
+ ],
+ "properties": {
+ "group": {
+ "description": "Group to map volume access to Default is no group",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.",
+ "type": "boolean"
+ },
+ "registry": {
+ "description": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes",
+ "type": "string"
+ },
+ "tenant": {
+ "description": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin",
+ "type": "string"
+ },
+ "user": {
+ "description": "User to map volume access to Defaults to serivceaccount user",
+ "type": "string"
+ },
+ "volume": {
+ "description": "Volume is a string that references an already created Quobyte volume by name.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.RBDVolumeSource": {
+ "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+ "required": [
+ "monitors",
+ "image"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
+ "type": "string"
+ },
+ "image": {
+ "description": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "string"
+ },
+ "keyring": {
+ "description": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "string"
+ },
+ "monitors": {
+ "description": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "pool": {
+ "description": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "user": {
+ "description": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ReplicaSet": {
+ "description": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.ReplicaSetSpec"
+ },
+ "status": {
+ "description": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.ReplicaSetStatus"
+ }
+ }
+ },
+ "v1.ReplicaSetCondition": {
+ "description": "ReplicaSetCondition describes the state of a replica set at a certain point.",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "description": "The last time the condition transitioned from one status to another.",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human readable message indicating details about the transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "The reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of replica set condition.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ReplicaSetSpec": {
+ "description": "ReplicaSetSpec is the specification of a ReplicaSet.",
+ "required": [
+ "selector"
+ ],
+ "properties": {
+ "minReadySeconds": {
+ "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+ "type": "integer",
+ "format": "int32"
+ },
+ "replicas": {
+ "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "type": "integer",
+ "format": "int32"
+ },
+ "selector": {
+ "description": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "template": {
+ "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+ "$ref": "#/definitions/v1.PodTemplateSpec"
+ }
+ }
+ },
+ "v1.ReplicaSetStatus": {
+ "description": "ReplicaSetStatus represents the current status of a ReplicaSet.",
+ "required": [
+ "replicas"
+ ],
+ "properties": {
+ "availableReplicas": {
+ "description": "The number of available replicas (ready for at least minReadySeconds) for this replica set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "conditions": {
+ "description": "Represents the latest available observations of a replica set's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.ReplicaSetCondition"
+ }
+ },
+ "fullyLabeledReplicas": {
+ "description": "The number of pods that have labels matching the labels of the pod template of the replicaset.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "observedGeneration": {
+ "description": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "readyReplicas": {
+ "description": "The number of ready replicas for this replica set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "replicas": {
+ "description": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.ResourceFieldSelector": {
+ "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
+ "required": [
+ "resource"
+ ],
+ "properties": {
+ "containerName": {
+ "description": "Container name: required for volumes, optional for env vars",
+ "type": "string"
+ },
+ "divisor": {
+ "description": "Specifies the output format of the exposed resources, defaults to \"1\"",
+ "type": "string"
+ },
+ "resource": {
+ "description": "Required: resource to select",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ResourceQuotaStatus": {
+ "description": "ResourceQuotaStatus defines the enforced hard limits and observed use.",
+ "properties": {
+ "hard": {
+ "description": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ },
+ "used": {
+ "description": "Used is the current observed total usage of the resource in the namespace.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ }
+ }
+ },
+ "v1.ResourceRequirements": {
+ "description": "ResourceRequirements describes the compute resource requirements.",
+ "properties": {
+ "limits": {
+ "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ },
+ "requests": {
+ "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/resource.Quantity"
+ }
+ }
+ }
+ },
+ "v1.Role": {
+ "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.",
+ "required": [
+ "rules"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata.",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "rules": {
+ "description": "Rules holds all the PolicyRules for this Role",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PolicyRule"
+ }
+ }
+ }
+ },
+ "v1.RollingUpdateDaemonSet": {
+ "description": "Spec to control the desired behavior of daemon set rolling update.",
+ "properties": {
+ "maxUnavailable": {
+ "description": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.RollingUpdateDeployment": {
+ "description": "Spec to control the desired behavior of rolling update.",
+ "properties": {
+ "maxSurge": {
+ "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
+ "type": "string"
+ },
+ "maxUnavailable": {
+ "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.RollingUpdateStatefulSetStrategy": {
+ "description": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
+ "properties": {
+ "partition": {
+ "description": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.SELinuxOptions": {
+ "description": "SELinuxOptions are the labels to be applied to the container",
+ "properties": {
+ "level": {
+ "description": "Level is SELinux level label that applies to the container.",
+ "type": "string"
+ },
+ "role": {
+ "description": "Role is a SELinux role label that applies to the container.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type is a SELinux type label that applies to the container.",
+ "type": "string"
+ },
+ "user": {
+ "description": "User is a SELinux user label that applies to the container.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ScaleIOVolumeSource": {
+ "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume",
+ "required": [
+ "gateway",
+ "system",
+ "secretRef"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".",
+ "type": "string"
+ },
+ "gateway": {
+ "description": "The host address of the ScaleIO API Gateway.",
+ "type": "string"
+ },
+ "protectionDomain": {
+ "description": "The name of the ScaleIO Protection Domain for the configured storage.",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "sslEnabled": {
+ "description": "Flag to enable/disable SSL communication with Gateway, default false",
+ "type": "boolean"
+ },
+ "storageMode": {
+ "description": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
+ "type": "string"
+ },
+ "storagePool": {
+ "description": "The ScaleIO Storage Pool associated with the protection domain.",
+ "type": "string"
+ },
+ "system": {
+ "description": "The name of the storage system as configured in ScaleIO.",
+ "type": "string"
+ },
+ "volumeName": {
+ "description": "The name of a volume already created in the ScaleIO system that is associated with this volume source.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Secret": {
+ "description": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "data": {
+ "description": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
+ "type": "object"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "stringData": {
+ "description": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "type": {
+ "description": "Used to facilitate programmatic handling of secret data.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.SecretEnvSource": {
+ "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.",
+ "properties": {
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the Secret must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.SecretKeySelector": {
+ "description": "SecretKeySelector selects a key of a Secret.",
+ "required": [
+ "key"
+ ],
+ "properties": {
+ "key": {
+ "description": "The key of the secret to select from. Must be a valid secret key.",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the Secret or its key must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.SecretProjection": {
+ "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.",
+ "properties": {
+ "items": {
+ "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.KeyToPath"
+ }
+ },
+ "name": {
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "optional": {
+ "description": "Specify whether the Secret or its key must be defined",
+ "type": "boolean"
+ }
+ }
+ },
+ "v1.SecretReference": {
+ "description": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace",
+ "properties": {
+ "name": {
+ "description": "Name is unique within a namespace to reference a secret resource.",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace defines the space within which the secret name must be unique.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.SecretVolumeSource": {
+ "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
+ "properties": {
+ "defaultMode": {
+ "description": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "items": {
+ "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.KeyToPath"
+ }
+ },
+ "optional": {
+ "description": "Specify whether the Secret or its keys must be defined",
+ "type": "boolean"
+ },
+ "secretName": {
+ "description": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ "type": "string"
+ }
+ }
+ },
+ "v1.SecurityContext": {
+ "description": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.",
+ "properties": {
+ "allowPrivilegeEscalation": {
+ "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN",
+ "type": "boolean"
+ },
+ "capabilities": {
+ "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.",
+ "$ref": "#/definitions/v1.Capabilities"
+ },
+ "privileged": {
+ "description": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.",
+ "type": "boolean"
+ },
+ "procMount": {
+ "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.",
+ "$ref": "#/definitions/v1.ProcMountType"
+ },
+ "readOnlyRootFilesystem": {
+ "description": "Whether this container has a read-only root filesystem. Default is false.",
+ "type": "boolean"
+ },
+ "runAsGroup": {
+ "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "runAsNonRoot": {
+ "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "type": "boolean"
+ },
+ "runAsUser": {
+ "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "seLinuxOptions": {
+ "description": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "$ref": "#/definitions/v1.SELinuxOptions"
+ },
+ "windowsOptions": {
+ "description": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "$ref": "#/definitions/v1.WindowsSecurityContextOptions"
+ }
+ }
+ },
+ "v1.Service": {
+ "description": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.ServiceSpec"
+ },
+ "status": {
+ "description": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1.ServiceStatus"
+ }
+ }
+ },
+ "v1.ServiceAccountTokenProjection": {
+ "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
+ "required": [
+ "path"
+ ],
+ "properties": {
+ "audience": {
+ "description": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.",
+ "type": "string"
+ },
+ "expirationSeconds": {
+ "description": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "path": {
+ "description": "Path is the path relative to the mount point of the file to project the token into.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ServicePort": {
+ "description": "ServicePort contains information on service's port.",
+ "required": [
+ "port"
+ ],
+ "properties": {
+ "name": {
+ "description": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
+ "type": "string"
+ },
+ "nodePort": {
+ "description": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+ "type": "integer",
+ "format": "int32"
+ },
+ "port": {
+ "description": "The port that will be exposed by this service.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "protocol": {
+ "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
+ "type": "string"
+ },
+ "targetPort": {
+ "description": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ServiceSpec": {
+ "description": "ServiceSpec describes the attributes that a user creates on a service.",
+ "properties": {
+ "clusterIP": {
+ "description": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ "type": "string"
+ },
+ "externalIPs": {
+ "description": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "externalName": {
+ "description": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.",
+ "type": "string"
+ },
+ "externalTrafficPolicy": {
+ "description": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
+ "type": "string"
+ },
+ "healthCheckNodePort": {
+ "description": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "ipFamily": {
+ "description": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.",
+ "$ref": "#/definitions/v1.IPFamily"
+ },
+ "loadBalancerIP": {
+ "description": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
+ "type": "string"
+ },
+ "loadBalancerSourceRanges": {
+ "description": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "ports": {
+ "description": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.ServicePort"
+ }
+ },
+ "publishNotReadyAddresses": {
+ "description": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.",
+ "type": "boolean"
+ },
+ "selector": {
+ "description": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "sessionAffinity": {
+ "description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ "type": "string"
+ },
+ "sessionAffinityConfig": {
+ "description": "sessionAffinityConfig contains the configurations of session affinity.",
+ "$ref": "#/definitions/v1.SessionAffinityConfig"
+ },
+ "topologyKeys": {
+ "description": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "type": {
+ "description": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
+ "type": "string"
+ }
+ }
+ },
+ "v1.ServiceStatus": {
+ "description": "ServiceStatus represents the current status of a service.",
+ "properties": {
+ "loadBalancer": {
+ "description": "LoadBalancer contains the current status of the load-balancer, if one is present.",
+ "$ref": "#/definitions/v1.LoadBalancerStatus"
+ }
+ }
+ },
+ "v1.SessionAffinityConfig": {
+ "description": "SessionAffinityConfig represents the configurations of session affinity.",
+ "properties": {
+ "clientIP": {
+ "description": "clientIP contains the configurations of Client IP based session affinity.",
+ "$ref": "#/definitions/v1.ClientIPConfig"
+ }
+ }
+ },
+ "v1.StatefulSet": {
+ "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec defines the desired identities of pods in this set.",
+ "$ref": "#/definitions/v1.StatefulSetSpec"
+ },
+ "status": {
+ "description": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+ "$ref": "#/definitions/v1.StatefulSetStatus"
+ }
+ }
+ },
+ "v1.StatefulSetCondition": {
+ "description": "StatefulSetCondition describes the state of a statefulset at a certain point.",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastTransitionTime": {
+ "description": "Last time the condition transitioned from one status to another.",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human readable message indicating details about the transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "The reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type of statefulset condition.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.StatefulSetSpec": {
+ "description": "A StatefulSetSpec is the specification of a StatefulSet.",
+ "required": [
+ "selector",
+ "template",
+ "serviceName"
+ ],
+ "properties": {
+ "podManagementPolicy": {
+ "description": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
+ "type": "string"
+ },
+ "replicas": {
+ "description": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "revisionHistoryLimit": {
+ "description": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "selector": {
+ "description": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "serviceName": {
+ "description": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.",
+ "type": "string"
+ },
+ "template": {
+ "description": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.",
+ "$ref": "#/definitions/v1.PodTemplateSpec"
+ },
+ "updateStrategy": {
+ "description": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
+ "$ref": "#/definitions/v1.StatefulSetUpdateStrategy"
+ },
+ "volumeClaimTemplates": {
+ "description": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PersistentVolumeClaim"
+ }
+ }
+ }
+ },
+ "v1.StatefulSetStatus": {
+ "description": "StatefulSetStatus represents the current state of a StatefulSet.",
+ "required": [
+ "replicas"
+ ],
+ "properties": {
+ "collisionCount": {
+ "description": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "conditions": {
+ "description": "Represents the latest available observations of a statefulset's current state.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.StatefulSetCondition"
+ }
+ },
+ "currentReplicas": {
+ "description": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "currentRevision": {
+ "description": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).",
+ "type": "string"
+ },
+ "observedGeneration": {
+ "description": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "readyReplicas": {
+ "description": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "replicas": {
+ "description": "replicas is the number of Pods created by the StatefulSet controller.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "updateRevision": {
+ "description": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
+ "type": "string"
+ },
+ "updatedReplicas": {
+ "description": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.StatefulSetUpdateStrategy": {
+ "description": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
+ "properties": {
+ "rollingUpdate": {
+ "description": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.",
+ "$ref": "#/definitions/v1.RollingUpdateStatefulSetStrategy"
+ },
+ "type": {
+ "description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.StorageOSVolumeSource": {
+ "description": "Represents a StorageOS persistent volume resource.",
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+ "type": "boolean"
+ },
+ "secretRef": {
+ "description": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.",
+ "$ref": "#/definitions/v1.LocalObjectReference"
+ },
+ "volumeName": {
+ "description": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.",
+ "type": "string"
+ },
+ "volumeNamespace": {
+ "description": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Sysctl": {
+ "description": "Sysctl defines a kernel parameter to be set",
+ "required": [
+ "name",
+ "value"
+ ],
+ "properties": {
+ "name": {
+ "description": "Name of a property to set",
+ "type": "string"
+ },
+ "value": {
+ "description": "Value of a property to set",
+ "type": "string"
+ }
+ }
+ },
+ "v1.TCPSocketAction": {
+ "description": "TCPSocketAction describes an action based on opening a socket",
+ "required": [
+ "port"
+ ],
+ "properties": {
+ "host": {
+ "description": "Optional: Host name to connect to, defaults to the pod IP.",
+ "type": "string"
+ },
+ "port": {
+ "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Toleration": {
+ "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.",
+ "properties": {
+ "effect": {
+ "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
+ "type": "string"
+ },
+ "key": {
+ "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
+ "type": "string"
+ },
+ "operator": {
+ "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
+ "type": "string"
+ },
+ "tolerationSeconds": {
+ "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "value": {
+ "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.TopologySpreadConstraint": {
+ "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
+ "required": [
+ "maxSkew",
+ "topologyKey",
+ "whenUnsatisfiable"
+ ],
+ "properties": {
+ "labelSelector": {
+ "description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "maxSkew": {
+ "description": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ",
+ "type": "integer",
+ "format": "int32"
+ },
+ "topologyKey": {
+ "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.",
+ "type": "string"
+ },
+ "whenUnsatisfiable": {
+ "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
+ "type": "string"
+ }
+ }
+ },
+ "v1.TypedLocalObjectReference": {
+ "description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
+ "required": [
+ "apiGroup",
+ "kind",
+ "name"
+ ],
+ "properties": {
+ "apiGroup": {
+ "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is the type of resource being referenced",
+ "type": "string"
+ },
+ "name": {
+ "description": "Name is the name of resource being referenced",
+ "type": "string"
+ }
+ }
+ },
+ "v1.Volume": {
+ "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "awsElasticBlockStore": {
+ "description": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ "$ref": "#/definitions/v1.AWSElasticBlockStoreVolumeSource"
+ },
+ "azureDisk": {
+ "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ "$ref": "#/definitions/v1.AzureDiskVolumeSource"
+ },
+ "azureFile": {
+ "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ "$ref": "#/definitions/v1.AzureFileVolumeSource"
+ },
+ "cephfs": {
+ "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ "$ref": "#/definitions/v1.CephFSVolumeSource"
+ },
+ "cinder": {
+ "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ "$ref": "#/definitions/v1.CinderVolumeSource"
+ },
+ "configMap": {
+ "description": "ConfigMap represents a configMap that should populate this volume",
+ "$ref": "#/definitions/v1.ConfigMapVolumeSource"
+ },
+ "csi": {
+ "description": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).",
+ "$ref": "#/definitions/v1.CSIVolumeSource"
+ },
+ "downwardAPI": {
+ "description": "DownwardAPI represents downward API about the pod that should populate this volume",
+ "$ref": "#/definitions/v1.DownwardAPIVolumeSource"
+ },
+ "emptyDir": {
+ "description": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ "$ref": "#/definitions/v1.EmptyDirVolumeSource"
+ },
+ "fc": {
+ "description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ "$ref": "#/definitions/v1.FCVolumeSource"
+ },
+ "flexVolume": {
+ "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ "$ref": "#/definitions/v1.FlexVolumeSource"
+ },
+ "flocker": {
+ "description": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ "$ref": "#/definitions/v1.FlockerVolumeSource"
+ },
+ "gcePersistentDisk": {
+ "description": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ "$ref": "#/definitions/v1.GCEPersistentDiskVolumeSource"
+ },
+ "gitRepo": {
+ "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+ "$ref": "#/definitions/v1.GitRepoVolumeSource"
+ },
+ "glusterfs": {
+ "description": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ "$ref": "#/definitions/v1.GlusterfsVolumeSource"
+ },
+ "hostPath": {
+ "description": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ "$ref": "#/definitions/v1.HostPathVolumeSource"
+ },
+ "iscsi": {
+ "description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+ "$ref": "#/definitions/v1.ISCSIVolumeSource"
+ },
+ "name": {
+ "description": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ "type": "string"
+ },
+ "nfs": {
+ "description": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ "$ref": "#/definitions/v1.NFSVolumeSource"
+ },
+ "persistentVolumeClaim": {
+ "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource"
+ },
+ "photonPersistentDisk": {
+ "description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ "$ref": "#/definitions/v1.PhotonPersistentDiskVolumeSource"
+ },
+ "portworxVolume": {
+ "description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ "$ref": "#/definitions/v1.PortworxVolumeSource"
+ },
+ "projected": {
+ "description": "Items for all in one resources secrets, configmaps, and downward API",
+ "$ref": "#/definitions/v1.ProjectedVolumeSource"
+ },
+ "quobyte": {
+ "description": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ "$ref": "#/definitions/v1.QuobyteVolumeSource"
+ },
+ "rbd": {
+ "description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ "$ref": "#/definitions/v1.RBDVolumeSource"
+ },
+ "scaleIO": {
+ "description": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ "$ref": "#/definitions/v1.ScaleIOVolumeSource"
+ },
+ "secret": {
+ "description": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ "$ref": "#/definitions/v1.SecretVolumeSource"
+ },
+ "storageos": {
+ "description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ "$ref": "#/definitions/v1.StorageOSVolumeSource"
+ },
+ "vsphereVolume": {
+ "description": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ "$ref": "#/definitions/v1.VsphereVirtualDiskVolumeSource"
+ }
+ }
+ },
+ "v1.VolumeDevice": {
+ "description": "volumeDevice describes a mapping of a raw block device within a container.",
+ "required": [
+ "name",
+ "devicePath"
+ ],
+ "properties": {
+ "devicePath": {
+ "description": "devicePath is the path inside of the container that the device will be mapped to.",
+ "type": "string"
+ },
+ "name": {
+ "description": "name must match the name of a persistentVolumeClaim in the pod",
+ "type": "string"
+ }
+ }
+ },
+ "v1.VolumeMount": {
+ "description": "VolumeMount describes a mounting of a Volume within a container.",
+ "required": [
+ "name",
+ "mountPath"
+ ],
+ "properties": {
+ "mountPath": {
+ "description": "Path within the container at which the volume should be mounted. Must not contain ':'.",
+ "type": "string"
+ },
+ "mountPropagation": {
+ "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.",
+ "$ref": "#/definitions/v1.MountPropagationMode"
+ },
+ "name": {
+ "description": "This must match the Name of a Volume.",
+ "type": "string"
+ },
+ "readOnly": {
+ "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
+ "type": "boolean"
+ },
+ "subPath": {
+ "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
+ "type": "string"
+ },
+ "subPathExpr": {
+ "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.",
+ "type": "string"
+ }
+ }
+ },
+ "v1.VolumeProjection": {
+ "description": "Projection that may be projected along with other supported volume types",
+ "properties": {
+ "configMap": {
+ "description": "information about the configMap data to project",
+ "$ref": "#/definitions/v1.ConfigMapProjection"
+ },
+ "downwardAPI": {
+ "description": "information about the downwardAPI data to project",
+ "$ref": "#/definitions/v1.DownwardAPIProjection"
+ },
+ "secret": {
+ "description": "information about the secret data to project",
+ "$ref": "#/definitions/v1.SecretProjection"
+ },
+ "serviceAccountToken": {
+ "description": "information about the serviceAccountToken data to project",
+ "$ref": "#/definitions/v1.ServiceAccountTokenProjection"
+ }
+ }
+ },
+ "v1.VsphereVirtualDiskVolumeSource": {
+ "description": "Represents a vSphere volume resource.",
+ "required": [
+ "volumePath"
+ ],
+ "properties": {
+ "fsType": {
+ "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+ "type": "string"
+ },
+ "storagePolicyID": {
+ "description": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.",
+ "type": "string"
+ },
+ "storagePolicyName": {
+ "description": "Storage Policy Based Management (SPBM) profile name.",
+ "type": "string"
+ },
+ "volumePath": {
+ "description": "Path that identifies vSphere volume vmdk",
+ "type": "string"
+ }
+ }
+ },
+ "v1.WeightedPodAffinityTerm": {
+ "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
+ "required": [
+ "weight",
+ "podAffinityTerm"
+ ],
+ "properties": {
+ "podAffinityTerm": {
+ "description": "Required. A pod affinity term, associated with the corresponding weight.",
+ "$ref": "#/definitions/v1.PodAffinityTerm"
+ },
+ "weight": {
+ "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1.WindowsSecurityContextOptions": {
+ "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
+ "properties": {
+ "gmsaCredentialSpec": {
+ "description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
+ "type": "string"
+ },
+ "gmsaCredentialSpecName": {
+ "description": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
+ "type": "string"
+ },
+ "runAsUserName": {
+ "description": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha1.APIResponse": {
+ "properties": {
+ "histogram": {
+ "description": "histogram results",
+ "$ref": "#/definitions/events.Histogram"
+ },
+ "query": {
+ "description": "query results",
+ "$ref": "#/definitions/events.Events"
+ },
+ "statistics": {
+ "description": "statistics results",
+ "$ref": "#/definitions/events.Statistics"
+ }
+ }
+ },
+ "v1alpha1.WorkspaceSpec": {
+ "properties": {
+ "manager": {
+ "type": "string"
+ },
+ "networkIsolation": {
+ "type": "boolean"
+ }
+ }
+ },
+ "v1alpha2.APIResponse": {
+ "properties": {
+ "histogram": {
+ "description": "histogram results",
+ "$ref": "#/definitions/logging.Histogram"
+ },
+ "query": {
+ "description": "query results",
+ "$ref": "#/definitions/logging.Logs"
+ },
+ "statistics": {
+ "description": "statistics results",
+ "$ref": "#/definitions/logging.Statistics"
+ }
+ }
+ },
+ "v1alpha2.BadRequestError": {
+ "required": [
+ "status",
+ "reason"
+ ],
+ "properties": {
+ "status": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha2.Column": {
+ "required": [
+ "id",
+ "label",
+ "dataType"
+ ],
+ "properties": {
+ "dataType": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.ComponentStatus": {
+ "required": [
+ "name",
+ "namespace",
+ "selfLink",
+ "label",
+ "startedAt",
+ "totalBackends",
+ "healthyBackends"
+ ],
+ "properties": {
+ "healthyBackends": {
+ "description": "the number of healthy backend components",
+ "type": "integer",
+ "format": "int32"
+ },
+ "label": {
+ "description": "labels",
+ "$ref": "#/definitions/v1alpha2.ComponentStatus.label"
+ },
+ "name": {
+ "description": "component name",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "the name of the namespace",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "self link",
+ "type": "string"
+ },
+ "startedAt": {
+ "description": "started time",
+ "type": "string",
+ "format": "date-time"
+ },
+ "totalBackends": {
+ "description": "the total replicas of each backend system component",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha2.ComponentStatus.label": {},
+ "v1alpha2.Connection": {
+ "required": [
+ "id",
+ "nodeId",
+ "label"
+ ],
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "labelMinor": {
+ "type": "string"
+ },
+ "metadata": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.MetadataRow"
+ }
+ },
+ "nodeId": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.ConnectionsSummary": {
+ "required": [
+ "id",
+ "topologyId",
+ "label",
+ "columns",
+ "connections"
+ ],
+ "properties": {
+ "columns": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Column"
+ }
+ },
+ "connections": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Connection"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "topologyId": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.Control": {
+ "required": [
+ "id",
+ "human",
+ "icon",
+ "rank"
+ ],
+ "properties": {
+ "confirmation": {
+ "type": "string"
+ },
+ "human": {
+ "type": "string"
+ },
+ "icon": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "rank": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha2.ControlInstance": {
+ "required": [
+ "ProbeID",
+ "NodeID",
+ "Control"
+ ],
+ "properties": {
+ "Control": {
+ "$ref": "#/definitions/v1alpha2.Control"
+ },
+ "NodeID": {
+ "type": "string"
+ },
+ "ProbeID": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.GlobalRole": {
+ "required": [
+ "rules"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "rules": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PolicyRule"
+ }
+ }
+ }
+ },
+ "v1alpha2.HealthStatus": {
+ "required": [
+ "kubesphereStatus",
+ "nodeStatus"
+ ],
+ "properties": {
+ "kubesphereStatus": {
+ "description": "kubesphere components status",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ComponentStatus"
+ }
+ },
+ "nodeStatus": {
+ "description": "nodes status",
+ "$ref": "#/definitions/v1alpha2.NodeStatus"
+ }
+ }
+ },
+ "v1alpha2.Member": {
+ "required": [
+ "username",
+ "roleRef"
+ ],
+ "properties": {
+ "roleRef": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.MetadataRow": {
+ "required": [
+ "id",
+ "label",
+ "value"
+ ],
+ "properties": {
+ "dataType": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "priority": {
+ "type": "number",
+ "format": "double"
+ },
+ "truncate": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.Metric": {
+ "required": [
+ "min",
+ "max"
+ ],
+ "properties": {
+ "max": {
+ "type": "number",
+ "format": "double"
+ },
+ "min": {
+ "type": "number",
+ "format": "double"
+ },
+ "samples": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Sample"
+ }
+ }
+ }
+ },
+ "v1alpha2.MetricRow": {
+ "required": [
+ "ID",
+ "Label",
+ "Format",
+ "Group",
+ "Value",
+ "ValueEmpty",
+ "Priority",
+ "URL",
+ "Metric"
+ ],
+ "properties": {
+ "Format": {
+ "type": "string"
+ },
+ "Group": {
+ "type": "string"
+ },
+ "ID": {
+ "type": "string"
+ },
+ "Label": {
+ "type": "string"
+ },
+ "Metric": {
+ "$ref": "#/definitions/v1alpha2.Metric"
+ },
+ "Priority": {
+ "type": "number",
+ "format": "double"
+ },
+ "URL": {
+ "type": "string"
+ },
+ "Value": {
+ "type": "number",
+ "format": "double"
+ },
+ "ValueEmpty": {
+ "type": "boolean"
+ }
+ }
+ },
+ "v1alpha2.Node": {
+ "required": [
+ "labelMinor",
+ "rank",
+ "id",
+ "label",
+ "controls"
+ ],
+ "properties": {
+ "adjacency": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "children": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.NodeSummaryGroup"
+ }
+ },
+ "connections": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ConnectionsSummary"
+ }
+ },
+ "controls": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.ControlInstance"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "labelMinor": {
+ "type": "string"
+ },
+ "metadata": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.MetadataRow"
+ }
+ },
+ "metrics": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.MetricRow"
+ }
+ },
+ "parents": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Parent"
+ }
+ },
+ "pseudo": {
+ "type": "boolean"
+ },
+ "rank": {
+ "type": "string"
+ },
+ "shape": {
+ "type": "string"
+ },
+ "stack": {
+ "type": "boolean"
+ },
+ "tables": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Table"
+ }
+ },
+ "tag": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.NodeResponse": {
+ "required": [
+ "node"
+ ],
+ "properties": {
+ "node": {
+ "$ref": "#/definitions/v1alpha2.Node"
+ }
+ }
+ },
+ "v1alpha2.NodeStatus": {
+ "required": [
+ "totalNodes",
+ "healthyNodes"
+ ],
+ "properties": {
+ "healthyNodes": {
+ "description": "the number of healthy nodes",
+ "type": "integer",
+ "format": "int32"
+ },
+ "totalNodes": {
+ "description": "total number of nodes",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha2.NodeSummary": {
+ "required": [
+ "id",
+ "label",
+ "labelMinor",
+ "rank"
+ ],
+ "properties": {
+ "adjacency": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "labelMinor": {
+ "type": "string"
+ },
+ "metadata": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.MetadataRow"
+ }
+ },
+ "metrics": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.MetricRow"
+ }
+ },
+ "parents": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Parent"
+ }
+ },
+ "pseudo": {
+ "type": "boolean"
+ },
+ "rank": {
+ "type": "string"
+ },
+ "shape": {
+ "type": "string"
+ },
+ "stack": {
+ "type": "boolean"
+ },
+ "tables": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Table"
+ }
+ },
+ "tag": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.NodeSummaryGroup": {
+ "required": [
+ "id",
+ "label",
+ "nodes",
+ "topologyId",
+ "columns"
+ ],
+ "properties": {
+ "columns": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Column"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.NodeSummary"
+ }
+ },
+ "topologyId": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.NotFoundError": {
+ "required": [
+ "status",
+ "reason"
+ ],
+ "properties": {
+ "status": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha2.Parent": {
+ "required": [
+ "id",
+ "label",
+ "topologyId"
+ ],
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "topologyId": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.Row": {
+ "required": [
+ "id",
+ "entries"
+ ],
+ "properties": {
+ "entries": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "id": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.Sample": {
+ "required": [
+ "date",
+ "value"
+ ],
+ "properties": {
+ "date": {
+ "type": "string",
+ "format": "date-time"
+ },
+ "value": {
+ "type": "number",
+ "format": "double"
+ }
+ }
+ },
+ "v1alpha2.Table": {
+ "required": [
+ "id",
+ "label",
+ "type",
+ "columns",
+ "rows"
+ ],
+ "properties": {
+ "columns": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Column"
+ }
+ },
+ "id": {
+ "type": "string"
+ },
+ "label": {
+ "type": "string"
+ },
+ "rows": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha2.Row"
+ }
+ },
+ "truncationCount": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "type": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.TopologyResponse": {
+ "required": [
+ "nodes"
+ ],
+ "properties": {
+ "nodes": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/v1alpha2.NodeSummary"
+ }
+ }
+ }
+ },
+ "v1alpha2.User": {
+ "required": [
+ "spec"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/v1alpha2.UserSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/v1alpha2.UserStatus"
+ }
+ }
+ },
+ "v1alpha2.UserSpec": {
+ "required": [
+ "email"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "displayName": {
+ "type": "string"
+ },
+ "email": {
+ "type": "string"
+ },
+ "groups": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "lang": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.UserStatus": {
+ "properties": {
+ "lastLoginTime": {
+ "type": "string"
+ },
+ "lastTransitionTime": {
+ "type": "string"
+ },
+ "reason": {
+ "type": "string"
+ },
+ "state": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha2.WorkspaceRole": {
+ "required": [
+ "rules"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "rules": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1.PolicyRule"
+ }
+ }
+ }
+ },
+ "v1alpha2.WorkspaceTemplate": {
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/v1beta1.FederatedWorkspaceSpec"
+ }
+ }
+ },
+ "v1alpha2.appHealthResponse": {
+ "required": [
+ "requests",
+ "workloadStatuses"
+ ],
+ "properties": {
+ "requests": {
+ "$ref": "#/definitions/models.RequestHealth"
+ },
+ "workloadStatuses": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/models.WorkloadStatus"
+ }
+ }
+ }
+ },
+ "v1alpha2.graphResponse": {
+ "required": [
+ "timestamp",
+ "duration",
+ "graphType",
+ "elements"
+ ],
+ "properties": {
+ "duration": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "elements": {
+ "$ref": "#/definitions/cytoscape.Elements"
+ },
+ "graphType": {
+ "type": "string"
+ },
+ "timestamp": {
+ "type": "integer",
+ "format": "int64"
+ }
+ }
+ },
+ "v1alpha2.metricsResponse": {
+ "required": [
+ "metrics",
+ "histograms"
+ ],
+ "properties": {
+ "histograms": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/prometheus.Metrics.histograms"
+ }
+ },
+ "metrics": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/prometheus.Metric"
+ }
+ }
+ }
+ },
+ "v1alpha2.namespaceAppHealthResponse": {
+ "required": [
+ "NamespaceAppHealth"
+ ],
+ "properties": {
+ "NamespaceAppHealth": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/models.AppHealth"
+ }
+ }
+ }
+ },
+ "v1alpha2.serviceHealthResponse": {
+ "required": [
+ "requests"
+ ],
+ "properties": {
+ "requests": {
+ "$ref": "#/definitions/models.RequestHealth"
+ }
+ }
+ },
+ "v1alpha2.workloadHealthResponse": {
+ "required": [
+ "requests",
+ "workloadStatus"
+ ],
+ "properties": {
+ "requests": {
+ "$ref": "#/definitions/models.RequestHealth"
+ },
+ "workloadStatus": {
+ "$ref": "#/definitions/models.WorkloadStatus"
+ }
+ }
+ },
+ "v1alpha3.BitbucketServerSource": {
+ "properties": {
+ "api_uri": {
+ "description": "The api url can specify the location of the github apiserver.For private cloud configuration",
+ "type": "string"
+ },
+ "credential_id": {
+ "description": "credential id to access github source",
+ "type": "string"
+ },
+ "discover_branches": {
+ "description": "Discover branch configuration",
+ "type": "integer",
+ "format": "int32"
+ },
+ "discover_pr_from_forks": {
+ "description": "Discover fork PR configuration",
+ "$ref": "#/definitions/v1alpha3.DiscoverPRFromForks"
+ },
+ "discover_pr_from_origin": {
+ "description": "Discover origin PR configuration",
+ "type": "integer",
+ "format": "int32"
+ },
+ "git_clone_option": {
+ "description": "advavced git clone options",
+ "$ref": "#/definitions/v1alpha3.GitCloneOption"
+ },
+ "owner": {
+ "description": "owner of github repo",
+ "type": "string"
+ },
+ "regex_filter": {
+ "description": "Regex used to match the name of the branch that needs to be run",
+ "type": "string"
+ },
+ "repo": {
+ "description": "repo name of github repo",
+ "type": "string"
+ },
+ "scm_id": {
+ "description": "uid of scm",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.DevOpsProject": {
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProjectSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/v1alpha3.DevOpsProjectStatus"
+ }
+ }
+ },
+ "v1alpha3.DevOpsProjectSpec": {},
+ "v1alpha3.DevOpsProjectStatus": {
+ "properties": {
+ "adminNamespace": {
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.DiscarderProperty": {
+ "properties": {
+ "days_to_keep": {
+ "description": "days to keep pipeline",
+ "type": "string"
+ },
+ "num_to_keep": {
+ "description": "nums to keep pipeline",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.DiscoverPRFromForks": {
+ "properties": {
+ "strategy": {
+ "description": "github discover strategy",
+ "type": "integer",
+ "format": "int32"
+ },
+ "trust": {
+ "description": "trust user type",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha3.GitCloneOption": {
+ "properties": {
+ "depth": {
+ "description": "git clone depth",
+ "type": "integer",
+ "format": "int32"
+ },
+ "shallow": {
+ "description": "Whether to use git shallow clone",
+ "type": "boolean"
+ },
+ "timeout": {
+ "description": "git clone timeout mins",
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ },
+ "v1alpha3.GitSource": {
+ "properties": {
+ "credential_id": {
+ "description": "credential id to access git source",
+ "type": "string"
+ },
+ "discover_branches": {
+ "description": "Whether to discover a branch",
+ "type": "boolean"
+ },
+ "git_clone_option": {
+ "description": "advavced git clone options",
+ "$ref": "#/definitions/v1alpha3.GitCloneOption"
+ },
+ "regex_filter": {
+ "description": "Regex used to match the name of the branch that needs to be run",
+ "type": "string"
+ },
+ "scm_id": {
+ "description": "uid of scm",
+ "type": "string"
+ },
+ "url": {
+ "description": "url of git source",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.GithubSource": {
+ "properties": {
+ "api_uri": {
+ "description": "The api url can specify the location of the github apiserver.For private cloud configuration",
+ "type": "string"
+ },
+ "credential_id": {
+ "description": "credential id to access github source",
+ "type": "string"
+ },
+ "discover_branches": {
+ "description": "Discover branch configuration",
+ "type": "integer",
+ "format": "int32"
+ },
+ "discover_pr_from_forks": {
+ "description": "Discover fork PR configuration",
+ "$ref": "#/definitions/v1alpha3.DiscoverPRFromForks"
+ },
+ "discover_pr_from_origin": {
+ "description": "Discover origin PR configuration",
+ "type": "integer",
+ "format": "int32"
+ },
+ "git_clone_option": {
+ "description": "advavced git clone options",
+ "$ref": "#/definitions/v1alpha3.GitCloneOption"
+ },
+ "owner": {
+ "description": "owner of github repo",
+ "type": "string"
+ },
+ "regex_filter": {
+ "description": "Regex used to match the name of the branch that needs to be run",
+ "type": "string"
+ },
+ "repo": {
+ "description": "repo name of github repo",
+ "type": "string"
+ },
+ "scm_id": {
+ "description": "uid of scm",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.MultiBranchJobTrigger": {
+ "properties": {
+ "create_action_job_to_trigger": {
+ "description": "pipeline name to trigger",
+ "type": "string"
+ },
+ "delete_action_job_to_trigger": {
+ "description": "pipeline name to trigger",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.MultiBranchPipeline": {
+ "required": [
+ "name",
+ "source_type",
+ "script_path"
+ ],
+ "properties": {
+ "bitbucket_server_source": {
+ "description": "bitbucket server scm defile",
+ "$ref": "#/definitions/v1alpha3.BitbucketServerSource"
+ },
+ "description": {
+ "description": "description of pipeline",
+ "type": "string"
+ },
+ "discarder": {
+ "description": "Discarder of pipeline, managing when to drop a pipeline",
+ "$ref": "#/definitions/v1alpha3.DiscarderProperty"
+ },
+ "git_source": {
+ "description": "git scm define",
+ "$ref": "#/definitions/v1alpha3.GitSource"
+ },
+ "github_source": {
+ "description": "github scm define",
+ "$ref": "#/definitions/v1alpha3.GithubSource"
+ },
+ "multibranch_job_trigger": {
+ "description": "Pipeline tasks that need to be triggered when branch creation/deletion",
+ "$ref": "#/definitions/v1alpha3.MultiBranchJobTrigger"
+ },
+ "name": {
+ "description": "name of pipeline",
+ "type": "string"
+ },
+ "script_path": {
+ "description": "script path in scm",
+ "type": "string"
+ },
+ "single_svn_source": {
+ "description": "single branch svn scm define",
+ "$ref": "#/definitions/v1alpha3.SingleSvnSource"
+ },
+ "source_type": {
+ "description": "type of scm, such as github/git/svn",
+ "type": "string"
+ },
+ "svn_source": {
+ "description": "multi branch svn scm define",
+ "$ref": "#/definitions/v1alpha3.SvnSource"
+ },
+ "timer_trigger": {
+ "description": "Timer to trigger pipeline run",
+ "$ref": "#/definitions/v1alpha3.TimerTrigger"
+ }
+ }
+ },
+ "v1alpha3.NoScmPipeline": {
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "description": {
+ "description": "description of pipeline",
+ "type": "string"
+ },
+ "disable_concurrent": {
+ "description": "Whether to prohibit the pipeline from running in parallel",
+ "type": "boolean"
+ },
+ "discarder": {
+ "description": "Discarder of pipeline, managing when to drop a pipeline",
+ "$ref": "#/definitions/v1alpha3.DiscarderProperty"
+ },
+ "jenkinsfile": {
+ "description": "Jenkinsfile's content'",
+ "type": "string"
+ },
+ "name": {
+ "description": "name of pipeline",
+ "type": "string"
+ },
+ "parameters": {
+ "description": "Parameters define of pipeline,user could pass param when run pipeline",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1alpha3.Parameter"
+ }
+ },
+ "remote_trigger": {
+ "description": "Remote api define to trigger pipeline run",
+ "$ref": "#/definitions/v1alpha3.RemoteTrigger"
+ },
+ "timer_trigger": {
+ "description": "Timer to trigger pipeline run",
+ "$ref": "#/definitions/v1alpha3.TimerTrigger"
+ }
+ }
+ },
+ "v1alpha3.Parameter": {
+ "required": [
+ "name",
+ "type"
+ ],
+ "properties": {
+ "default_value": {
+ "description": "default value of param",
+ "type": "string"
+ },
+ "description": {
+ "description": "description of pipeline",
+ "type": "string"
+ },
+ "name": {
+ "description": "name of param",
+ "type": "string"
+ },
+ "type": {
+ "description": "type of param",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.Pipeline": {
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/v1alpha3.PipelineSpec"
+ },
+ "status": {
+ "$ref": "#/definitions/v1alpha3.PipelineStatus"
+ }
+ }
+ },
+ "v1alpha3.PipelineSpec": {
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "multi_branch_pipeline": {
+ "description": "in scm pipeline structs",
+ "$ref": "#/definitions/v1alpha3.MultiBranchPipeline"
+ },
+ "pipeline": {
+ "description": "no scm pipeline structs",
+ "$ref": "#/definitions/v1alpha3.NoScmPipeline"
+ },
+ "type": {
+ "description": "type of devops pipeline, in scm or no scm",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.PipelineStatus": {},
+ "v1alpha3.RemoteTrigger": {
+ "properties": {
+ "token": {
+ "description": "remote trigger token",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.SingleSvnSource": {
+ "properties": {
+ "credential_id": {
+ "description": "credential id to access svn source",
+ "type": "string"
+ },
+ "remote": {
+ "description": "remote address url",
+ "type": "string"
+ },
+ "scm_id": {
+ "description": "uid of scm",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.SvnSource": {
+ "properties": {
+ "credential_id": {
+ "description": "credential id to access svn source",
+ "type": "string"
+ },
+ "excludes": {
+ "description": "branches do not run pipeline",
+ "type": "string"
+ },
+ "includes": {
+ "description": "branches to run pipeline",
+ "type": "string"
+ },
+ "remote": {
+ "description": "remote address url",
+ "type": "string"
+ },
+ "scm_id": {
+ "description": "uid of scm",
+ "type": "string"
+ }
+ }
+ },
+ "v1alpha3.TimerTrigger": {
+ "properties": {
+ "cron": {
+ "description": "jenkins cron script",
+ "type": "string"
+ },
+ "interval": {
+ "description": "interval ms",
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.ClusterOverride": {
+ "required": [
+ "path"
+ ],
+ "properties": {
+ "op": {
+ "type": "string"
+ },
+ "path": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.FederatedWorkspaceSpec": {
+ "required": [
+ "template",
+ "placement"
+ ],
+ "properties": {
+ "overrides": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.GenericOverrideItem"
+ }
+ },
+ "placement": {
+ "$ref": "#/definitions/v1beta1.GenericPlacementFields"
+ },
+ "template": {
+ "$ref": "#/definitions/v1beta1.WorkspaceTemplate"
+ }
+ }
+ },
+ "v1beta1.GenericClusterReference": {
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.GenericOverrideItem": {
+ "required": [
+ "clusterName"
+ ],
+ "properties": {
+ "clusterName": {
+ "type": "string"
+ },
+ "clusterOverrides": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.ClusterOverride"
+ }
+ }
+ }
+ },
+ "v1beta1.GenericPlacementFields": {
+ "properties": {
+ "clusterSelector": {
+ "$ref": "#/definitions/v1.LabelSelector"
+ },
+ "clusters": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.GenericClusterReference"
+ }
+ }
+ }
+ },
+ "v1beta1.HTTPIngressPath": {
+ "description": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.",
+ "required": [
+ "backend"
+ ],
+ "properties": {
+ "backend": {
+ "description": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
+ "$ref": "#/definitions/v1beta1.IngressBackend"
+ },
+ "path": {
+ "description": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.",
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.HTTPIngressRuleValue": {
+ "description": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.",
+ "required": [
+ "paths"
+ ],
+ "properties": {
+ "paths": {
+ "description": "A collection of paths that map requests to backends.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.HTTPIngressPath"
+ }
+ }
+ }
+ },
+ "v1beta1.Ingress": {
+ "description": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "description": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1beta1.IngressSpec"
+ },
+ "status": {
+ "description": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "$ref": "#/definitions/v1beta1.IngressStatus"
+ }
+ }
+ },
+ "v1beta1.IngressBackend": {
+ "description": "IngressBackend describes all endpoints for a given service and port.",
+ "required": [
+ "serviceName",
+ "servicePort"
+ ],
+ "properties": {
+ "serviceName": {
+ "description": "Specifies the name of the referenced service.",
+ "type": "string"
+ },
+ "servicePort": {
+ "description": "Specifies the port of the referenced service.",
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.IngressRule": {
+ "description": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.",
+ "properties": {
+ "host": {
+ "description": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.",
+ "type": "string"
+ },
+ "http": {
+ "$ref": "#/definitions/v1beta1.HTTPIngressRuleValue"
+ }
+ }
+ },
+ "v1beta1.IngressSpec": {
+ "description": "IngressSpec describes the Ingress the user wishes to exist.",
+ "properties": {
+ "backend": {
+ "description": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
+ "$ref": "#/definitions/v1beta1.IngressBackend"
+ },
+ "rules": {
+ "description": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.IngressRule"
+ }
+ },
+ "tls": {
+ "description": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/v1beta1.IngressTLS"
+ }
+ }
+ }
+ },
+ "v1beta1.IngressStatus": {
+ "description": "IngressStatus describe the current state of the Ingress.",
+ "properties": {
+ "loadBalancer": {
+ "description": "LoadBalancer contains the current status of the load-balancer.",
+ "$ref": "#/definitions/v1.LoadBalancerStatus"
+ }
+ }
+ },
+ "v1beta1.IngressTLS": {
+ "description": "IngressTLS describes the transport layer security associated with an Ingress.",
+ "properties": {
+ "hosts": {
+ "description": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "secretName": {
+ "description": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.",
+ "type": "string"
+ }
+ }
+ },
+ "v1beta1.WorkspaceTemplate": {
+ "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ "properties": {
+ "metadata": {
+ "$ref": "#/definitions/v1.ObjectMeta"
+ },
+ "spec": {
+ "$ref": "#/definitions/v1alpha1.WorkspaceSpec"
+ }
+ }
+ }
+ },
+ "securityDefinitions": {
+ "jwt": {
+ "type": "apiKey",
+ "name": "Authorization",
+ "in": "header"
+ }
+ },
+ "security": [
+ {
+ "jwt": []
+ }
+ ]
+}
\ No newline at end of file