diff --git a/content/en/docs/v4.1/02-quickstart/03-control-user-permissions.adoc b/content/en/docs/v4.1/02-quickstart/03-control-user-permissions.adoc index 3a33e2979..d304fcf6e 100644 --- a/content/en/docs/v4.1/02-quickstart/03-control-user-permissions.adoc +++ b/content/en/docs/v4.1/02-quickstart/03-control-user-permissions.adoc @@ -6,8 +6,15 @@ description: "Learn how to create users and control their permissions by roles i weight: 03 --- +ifeval::["{file_output_type}" == "html"] This section explains how to create users and control their permissions by roles in workspaces and projects. For more information on permission control, please refer to link:../../05-users-and-roles/[Users and Roles]. +endif::[] + +ifeval::["{file_output_type}" == "pdf"] +This section explains how to create users and control their permissions by roles in workspaces and projects. +For more information on permission control, please refer to {ks_product-en} Users and Roles. +endif::[] As a multi-tenant system, KubeSphere supports controlling user permissions based on roles at the platform, cluster, workspace, and project levels, achieving logical resource isolation. diff --git a/content/en/docs/v4.1/03-installation-and-upgrade/02-install-kubesphere/02-install-kubernetes-and-kubesphere.adoc b/content/en/docs/v4.1/03-installation-and-upgrade/02-install-kubesphere/02-install-kubernetes-and-kubesphere.adoc index e178972aa..e3903bd51 100644 --- a/content/en/docs/v4.1/03-installation-and-upgrade/02-install-kubesphere/02-install-kubernetes-and-kubesphere.adoc +++ b/content/en/docs/v4.1/03-installation-and-upgrade/02-install-kubesphere/02-install-kubernetes-and-kubesphere.adoc @@ -210,53 +210,53 @@ Kind: Cluster metadata: name: sample spec: - hosts: - - {name: controlplane1, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 23, user: ubuntu, password: Testing123, arch: arm64} # For arm64 nodes, please add the parameter arch: arm64 - - {name: controlplane2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, privateKeyPath: "~/.ssh/id_rsa"} - - {name: worker1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} - - {name: worker2, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} - - {name: registry, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} - roleGroups: - etcd: - - controlplane1 - - controlplane2 - control-plane: - - controlplane1 - - controlplane2 - worker: - - worker1 - - worker2 - # If you want to use kk to automatically deploy the image registry, please set up the registry (it is recommended that the image registry and cluster nodes be deployed separately to reduce mutual influence) - registry: - -registry - controlPlaneEndpoint: - internalLoadbalancer: haproxy # If you need to deploy a high availability cluster and no load balancer is available, you can enable this parameter to perform load balancing within the cluster. - domain: lb.kubesphere.local - address: "" - port: 6443 - kubernetes: - version: v1.23.15 - clusterName: cluster.local - network: - plugin: calico - kubePodsCIDR: 10.233.64.0/18 - kubeServiceCIDR: 10.233.0.0/18 - ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni - enableMultusCNI: false - registry: - # If you want to use kk to deploy harbor, you can set this parameter to harbor. If you do not set this parameter and you need to use kk to deploy the image registry, docker registry will be deployed by default. - # Harbor does not support arm64. This parameter does not need to be configured when deploying in an arm64 environment. - type: harbor - # If you use kk to deploy harbor or other registries that require authentication, you need to set the auths of the corresponding registries. If you use kk to deploy the default docker registry, you do not need to configure the auths parameter. - auths: - "dockerhub.kubekey.local": - username: admin # harbor default username - password: Harbor12345 # harbor default password - plainHTTP: false # If the registry uses http, please set this parameter to true - privateRegistry: "dockerhub.kubekey.local/kse" #Set the private registry address used during cluster deployment - registryMirrors: [] - insecureRegistries: [] - addons: [] + hosts: + - {name: controlplane1, address: 192.168.0.2, internalAddress: 192.168.0.2, port: 23, user: ubuntu, password: Testing123, arch: arm64} # For arm64 nodes, please add the parameter arch: arm64 + - {name: controlplane2, address: 192.168.0.3, internalAddress: 192.168.0.3, user: ubuntu, privateKeyPath: "~/.ssh/id_rsa"} + - {name: worker1, address: 192.168.0.4, internalAddress: 192.168.0.4, user: ubuntu, password: Testing123} + - {name: worker2, address: 192.168.0.5, internalAddress: 192.168.0.5, user: ubuntu, password: Testing123} + - {name: registry, address: 192.168.0.6, internalAddress: 192.168.0.6, user: ubuntu, password: Testing123} + roleGroups: + etcd: + - controlplane1 + - controlplane2 + control-plane: + - controlplane1 + - controlplane2 + worker: + - worker1 + - worker2 + # If you want to use kk to automatically deploy the image registry, please set up the registry (it is recommended that the image registry and cluster nodes be deployed separately to reduce mutual influence) + registry: + -registry + controlPlaneEndpoint: + internalLoadbalancer: haproxy # If you need to deploy a high availability cluster and no load balancer is available, you can enable this parameter to perform load balancing within the cluster. + domain: lb.kubesphere.local + address: "" + port: 6443 + kubernetes: + version: v1.23.15 + clusterName: cluster.local + network: + plugin: calico + kubePodsCIDR: 10.233.64.0/18 + kubeServiceCIDR: 10.233.0.0/18 + ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni + enableMultusCNI: false + registry: + # If you want to use kk to deploy harbor, you can set this parameter to harbor. If you do not set this parameter and you need to use kk to deploy the image registry, docker registry will be deployed by default. + # Harbor does not support arm64. This parameter does not need to be configured when deploying in an arm64 environment. + type: harbor + # If you use kk to deploy harbor or other registries that require authentication, you need to set the auths of the corresponding registries. If you use kk to deploy the default docker registry, you do not need to configure the auths parameter. + auths: + "dockerhub.kubekey.local": + username: admin # harbor default username + password: Harbor12345 # harbor default password + plainHTTP: false # If the registry uses http, please set this parameter to true + privateRegistry: "dockerhub.kubekey.local/kse" #Set the private registry address used during cluster deployment + registryMirrors: [] + insecureRegistries: [] + addons: [] ---- -- diff --git a/content/en/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc b/content/en/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc index cb535e77c..7a1e146f6 100644 --- a/content/en/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc +++ b/content/en/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc @@ -16,7 +16,7 @@ After configuring external identity providers, users can log in to the {ks_produ . Log in to the {ks_product-en} web console with a user having the **platform-admin** role. -. Navigate to the project **kubesphere-system** under the workspace **system-workspace**. +. Click **Cluster Management** and then enter the **host** cluster. + diff --git a/content/en/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc b/content/en/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc index c22dda784..7d0c668a3 100644 --- a/content/en/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc +++ b/content/en/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc @@ -20,8 +20,13 @@ Depending on your network environment, the host cluster and member clusters can | Note | -To use a agent connection, the **KubeSphere Multi-Cluster Agent Connection** extension needs to be installed and enabled on the KubeSphere platform. -// For more information, refer to link:../../../../11-use-extensions/19-tower/02-add-a-member-cluster-using-proxy-connection[Add a Member Cluster via Agent Connection]. +ifeval::["{file_output_type}" == "html"] +To use a agent connection, the **KubeSphere Multi-Cluster Agent Connection** extension needs to be installed and enabled on the {ks_product-en} platform. For more information, refer to link:../../../../11-use-extensions/19-tower/02-add-a-member-cluster-using-proxy-connection[Add a Member Cluster via Agent Connection]. +endif::[] + +ifeval::["{file_output_type}" == "pdf"] +To use a agent connection, the **KubeSphere Multi-Cluster Agent Connection** extension needs to be installed and enabled on the {ks_product-en} platform. For more information, refer to "KubeSphere Multi-Cluster Agent Connection" section in the {ks_product-en} Extension User Guide. +endif::[] |=== Whether using a direct connection or a agent connection, at least one of the host cluster and the member cluster must be able to access the services exposed by the other side. \ No newline at end of file diff --git a/content/en/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc b/content/en/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc index 106fd7613..cdd64be36 100644 --- a/content/en/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc +++ b/content/en/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc @@ -22,7 +22,7 @@ include::../../../_custom-en/platformManagement/platformManagement-oper-logIn.ad . Click **Workspace Management**. + -- -* The workspace list displays all workspaces on the KubeSphere platform. +* The workspace list displays all workspaces on the {ks_product-en} platform. * In the workspace list, click the name of a workspace to view and manage resources within it. -- diff --git a/content/en/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc b/content/en/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc index bd73d3cdb..92e0bf279 100644 --- a/content/en/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc +++ b/content/en/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc @@ -5,48 +5,33 @@ description: "Learn how to view the built-in Dashboards provided by the extensio weight: 01 --- -The Grafana for WhizardTelemetry extension comes with multiple Grafana Dashboards that allow direct querying of monitoring data for Kubernetes and KubeSphere without the need for manual configuration of Grafana Dashboards. +The Grafana for WhizardTelemetry extension comes with multiple Grafana Dashboards that allow direct querying of monitoring data for Kubernetes without the need for manual configuration of Grafana Dashboards. == Steps -. After logging into the Grafana console, click **Dashboards** in the left navigation pane to view all built-in Dashboard templates, which are in four directories: `aicp`, `kube-prometheus-stack`, `whizard-loki`, and `whizard-monitoring`. +. After logging into the Grafana console, click **Dashboards** in the left navigation pane to view all built-in Dashboard templates. + -- -image:/images/ks-qkcp/zh/v4.1.2/grafana/dashboard-list.png[dashboard-list] [%header,cols="1a,3a"] |=== |Directory |Description -|aicp -|Used for QingCloud AI Computing Platform, please view monitoring panels in the "AI Computing Management" platform. - |kube-prometheus-stack |Visualizes monitoring data for Kubernetes. - -|whizard-loki -|Visualizes logs, audits, events, and notification history of KubeSphere stored in Loki. - -|whizard-monitoring -|Multi-cluster monitoring adapted for Whizard and KubeSphere. |=== [.admon.attention,cols="a"] |=== -|Note +|Attention | -* After installing the **WhizardTelemetry Monitoring** extension, the Dashboards in **kube-prometheus-stack** and **whizard-monitoring** will display monitoring data. -* To display monitoring data in the Dashboards of **whizard-loki**, see link:../../17-loki/01-display-loki-data[Grafana Loki for WhizardTelemetry]. +After installing the **WhizardTelemetry Monitoring** extension, the Dashboards in **kube-prometheus-stack** will display monitoring data. |=== -- . Click on a Dashboard template in the directory to view the corresponding monitoring data. + -Below is an example using the **KubeSphere Nodes** template from the **whizard-monitoring** directory to introduce the Dashboard page. - -. The **KubeSphere Nodes** dashboard displays monitoring information for each node, including resource utilization of CPU, memory, disk, and pods, disk IOPS, disk throughput, network bandwidth, etc. -+ image:/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png[node-dashboard] . Click **data source**, **cluster**, and **node** at the top to select data from specified sources, clusters, and nodes. diff --git a/content/zh/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc b/content/zh/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc index 0e8ff2426..ed5eccb46 100644 --- a/content/zh/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc +++ b/content/zh/docs/v4.1/04-platform-management/03-platform-settings/03-external-authentication/01-set-up-external-authentication.adoc @@ -16,7 +16,7 @@ weight: 01 . 以具有 **platform-admin** 角色的用户登录{ks_product_left} Web 控制台。 -. 进入企业空间 **system-workspace** 下的项目 **kubesphere-system**。 +. 点击**集群管理**,并进入 host 集群。 + diff --git a/content/zh/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc b/content/zh/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc index 3eb88c777..b0af8c95a 100644 --- a/content/zh/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc +++ b/content/zh/docs/v4.1/07-cluster-management/10-multi-cluster-management/01-clusters/01-add-a-member-cluster/_index.adoc @@ -22,7 +22,13 @@ weight: 01 |说明 | +ifeval::["{file_output_type}" == "html"] 若要使用代理连接,{ks_product_both}平台需要安装并启用 **KubeSphere 多集群代理连接**扩展组件。有关更多信息,请参阅link:../../../../11-use-extensions/19-tower/02-add-a-member-cluster-using-proxy-connection/[通过代理连接添加成员集群]。 +endif::[] + +ifeval::["{file_output_type}" == "pdf"] +若要使用代理连接,{ks_product_both}平台需要安装并启用 **KubeSphere 多集群代理连接**扩展组件。有关更多信息,请参阅《{ks_product_right}扩展组件使用指南》的“KubeSphere 多集群代理连接”章节。 +endif::[] |=== diff --git a/content/zh/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc b/content/zh/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc index cff18bf99..ce48ce165 100644 --- a/content/zh/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc +++ b/content/zh/docs/v4.1/08-workspace-management/02-workspaces/02-view-a-workspace-list.adoc @@ -22,7 +22,7 @@ include::../../../_custom/platformManagement/platformManagement-oper-logIn.adoc[ . 点击**企业空间管理**。 + -- -* 企业空间列表显示当前 KubeSphere 平台的所有企业空间。 +* 企业空间列表显示当前{ks_product_both}平台的所有企业空间。 * 在企业空间列表中,点击企业空间的名称可进入企业空间,查看和管理企业空间中的资源。 -- diff --git a/content/zh/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc b/content/zh/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc index edd37027a..b58e0700e 100644 --- a/content/zh/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc +++ b/content/zh/docs/v4.1/11-use-extensions/05-observability-platform/16-grafana/01-built-in-dashboards.adoc @@ -5,30 +5,19 @@ description: "介绍如何查看扩展组件提供的内置 Dashboard。" weight: 01 --- -Grafana for WhizardTelemetry 扩展组件内置了多个 Grafana Dashboard 模板,可供直接查询 Kubernetes 和 KubeSphere 的监控数据,而无需自行配置 Grafana Dashboard。 +Grafana for WhizardTelemetry 扩展组件内置了多个 Grafana Dashboard 模板,可供直接查询 Kubernetes 的监控数据,而无需自行配置 Grafana Dashboard。 == 操作步骤 -. 登录 Grafana 控制台后,点击左侧导航栏的 **Dashboards**,查看所有内置的 Dashboard 模板。包含 aicp, kube-prometheus-stack, whizard-loki, whizard-monitoring 4 个目录。 +. 登录 Grafana 控制台后,点击左侧导航栏的 **Dashboards**,查看所有内置的 Dashboard 模板。 + -- -image:/images/ks-qkcp/zh/v4.1/grafana/dashboard-list.png[dashboard-list] - [%header,cols="1a,3a"] |=== |目录 |模板介绍 -|aicp -|用于青云 AI 智算运维管理端,需在“AI 智算管理”平台中查看监控面板。 - |kube-prometheus-stack |可视化 Kubernetes 的监控数据。 - -|whizard-loki -|可视化存储到 Loki 的 KubeSphere 日志、审计、事件及通知历史。 - -|whizard-monitoring -|适配 Whizard 与 KubeSphere 后的多集群监控。 |=== [.admon.attention,cols="a"] @@ -36,19 +25,12 @@ image:/images/ks-qkcp/zh/v4.1/grafana/dashboard-list.png[dashboard-list] |注意 | -* 安装 **WhizardTelemetry 监控**扩展组件后,**kube-prometheus-stack** 和 **whizard-monitoring** 中的 Dashboard 才会显示监控数据。 -* 若要 **whizard-loki** 中的 Dashboard 显示监控数据,请参阅link:../../17-loki/01-display-loki-data[ -Grafana Loki for WhizardTelemetry]。 +安装 **WhizardTelemetry 监控**扩展组件后,**kube-prometheus-stack** 中的 Dashboard 才会显示监控数据。 |=== -- . 点击目录中的 Dashboard 模板,查看对应指标的监控数据。 + -下面以 **whizard-monitoring** 中的 **KubeSphere Nodes** 模板为例,介绍 Dashboard 页面。 - - -. **KubeSphere Nodes** 看板展示了每个节点的 CPU、内存、磁盘和 pod 的资源利用率、磁盘 IOPS、磁盘吞吐量、网络带宽等监控信息。 -+ image:/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png[node-dashboard] . 点击上方的 **data source**、**cluster**、**node**,可选择查看指定数据源、集群和节点的相关数据。 diff --git a/static/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png b/static/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png index 9724041b9..ff2567f91 100644 Binary files a/static/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png and b/static/images/ks-qkcp/zh/v4.1.2/grafana/node-dashboard.png differ